1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
20 #include <linux/fs.h>
21 #include <linux/filelock.h>
22 #include <linux/splice.h>
23 #include <linux/task_io_accounting_ops.h>
24
fuse_send_open(struct fuse_mount * fm,u64 nodeid,unsigned int open_flags,int opcode,struct fuse_open_out * outargp)25 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
26 unsigned int open_flags, int opcode,
27 struct fuse_open_out *outargp)
28 {
29 struct fuse_open_in inarg;
30 FUSE_ARGS(args);
31
32 memset(&inarg, 0, sizeof(inarg));
33 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
34 if (!fm->fc->atomic_o_trunc)
35 inarg.flags &= ~O_TRUNC;
36
37 if (fm->fc->handle_killpriv_v2 &&
38 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
39 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
40 }
41
42 args.opcode = opcode;
43 args.nodeid = nodeid;
44 args.in_numargs = 1;
45 args.in_args[0].size = sizeof(inarg);
46 args.in_args[0].value = &inarg;
47 args.out_numargs = 1;
48 args.out_args[0].size = sizeof(*outargp);
49 args.out_args[0].value = outargp;
50
51 return fuse_simple_request(fm, &args);
52 }
53
fuse_file_alloc(struct fuse_mount * fm,bool release)54 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
55 {
56 struct fuse_file *ff;
57
58 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
59 if (unlikely(!ff))
60 return NULL;
61
62 ff->fm = fm;
63 if (release) {
64 ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT);
65 if (!ff->args) {
66 kfree(ff);
67 return NULL;
68 }
69 }
70
71 INIT_LIST_HEAD(&ff->write_entry);
72 refcount_set(&ff->count, 1);
73 RB_CLEAR_NODE(&ff->polled_node);
74 init_waitqueue_head(&ff->poll_wait);
75
76 ff->kh = atomic64_inc_return(&fm->fc->khctr);
77
78 return ff;
79 }
80
fuse_file_free(struct fuse_file * ff)81 void fuse_file_free(struct fuse_file *ff)
82 {
83 kfree(ff->args);
84 kfree(ff);
85 }
86
fuse_file_get(struct fuse_file * ff)87 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
88 {
89 refcount_inc(&ff->count);
90 return ff;
91 }
92
fuse_release_end(struct fuse_mount * fm,struct fuse_args * args,int error)93 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
94 int error)
95 {
96 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
97
98 iput(ra->inode);
99 kfree(ra);
100 }
101
fuse_file_put(struct fuse_file * ff,bool sync)102 static void fuse_file_put(struct fuse_file *ff, bool sync)
103 {
104 if (refcount_dec_and_test(&ff->count)) {
105 struct fuse_release_args *ra = &ff->args->release_args;
106 struct fuse_args *args = (ra ? &ra->args : NULL);
107
108 if (ra && ra->inode)
109 fuse_file_io_release(ff, ra->inode);
110
111 if (!args) {
112 /* Do nothing when server does not implement 'open' */
113 } else if (sync) {
114 fuse_simple_request(ff->fm, args);
115 fuse_release_end(ff->fm, args, 0);
116 } else {
117 args->end = fuse_release_end;
118 if (fuse_simple_background(ff->fm, args,
119 GFP_KERNEL | __GFP_NOFAIL))
120 fuse_release_end(ff->fm, args, -ENOTCONN);
121 }
122 kfree(ff);
123 }
124 }
125
fuse_file_open(struct fuse_mount * fm,u64 nodeid,unsigned int open_flags,bool isdir)126 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
127 unsigned int open_flags, bool isdir)
128 {
129 struct fuse_conn *fc = fm->fc;
130 struct fuse_file *ff;
131 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
132 bool open = isdir ? !fc->no_opendir : !fc->no_open;
133
134 ff = fuse_file_alloc(fm, open);
135 if (!ff)
136 return ERR_PTR(-ENOMEM);
137
138 ff->fh = 0;
139 /* Default for no-open */
140 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
141 if (open) {
142 /* Store outarg for fuse_finish_open() */
143 struct fuse_open_out *outargp = &ff->args->open_outarg;
144 int err;
145
146 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp);
147 if (!err) {
148 ff->fh = outargp->fh;
149 ff->open_flags = outargp->open_flags;
150 } else if (err != -ENOSYS) {
151 fuse_file_free(ff);
152 return ERR_PTR(err);
153 } else {
154 /* No release needed */
155 kfree(ff->args);
156 ff->args = NULL;
157 if (isdir)
158 fc->no_opendir = 1;
159 else
160 fc->no_open = 1;
161 }
162 }
163
164 if (isdir)
165 ff->open_flags &= ~FOPEN_DIRECT_IO;
166
167 ff->nodeid = nodeid;
168
169 return ff;
170 }
171
fuse_do_open(struct fuse_mount * fm,u64 nodeid,struct file * file,bool isdir)172 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
173 bool isdir)
174 {
175 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
176
177 if (!IS_ERR(ff))
178 file->private_data = ff;
179
180 return PTR_ERR_OR_ZERO(ff);
181 }
182 EXPORT_SYMBOL_GPL(fuse_do_open);
183
fuse_link_write_file(struct file * file)184 static void fuse_link_write_file(struct file *file)
185 {
186 struct inode *inode = file_inode(file);
187 struct fuse_inode *fi = get_fuse_inode(inode);
188 struct fuse_file *ff = file->private_data;
189 /*
190 * file may be written through mmap, so chain it onto the
191 * inodes's write_file list
192 */
193 spin_lock(&fi->lock);
194 if (list_empty(&ff->write_entry))
195 list_add(&ff->write_entry, &fi->write_files);
196 spin_unlock(&fi->lock);
197 }
198
fuse_finish_open(struct inode * inode,struct file * file)199 int fuse_finish_open(struct inode *inode, struct file *file)
200 {
201 struct fuse_file *ff = file->private_data;
202 struct fuse_conn *fc = get_fuse_conn(inode);
203 int err;
204
205 err = fuse_file_io_open(file, inode);
206 if (err)
207 return err;
208
209 if (ff->open_flags & FOPEN_STREAM)
210 stream_open(inode, file);
211 else if (ff->open_flags & FOPEN_NONSEEKABLE)
212 nonseekable_open(inode, file);
213
214 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
215 fuse_link_write_file(file);
216
217 return 0;
218 }
219
fuse_truncate_update_attr(struct inode * inode,struct file * file)220 static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
221 {
222 struct fuse_conn *fc = get_fuse_conn(inode);
223 struct fuse_inode *fi = get_fuse_inode(inode);
224
225 spin_lock(&fi->lock);
226 fi->attr_version = atomic64_inc_return(&fc->attr_version);
227 i_size_write(inode, 0);
228 spin_unlock(&fi->lock);
229 file_update_time(file);
230 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
231 }
232
fuse_open(struct inode * inode,struct file * file)233 static int fuse_open(struct inode *inode, struct file *file)
234 {
235 struct fuse_mount *fm = get_fuse_mount(inode);
236 struct fuse_inode *fi = get_fuse_inode(inode);
237 struct fuse_conn *fc = fm->fc;
238 struct fuse_file *ff;
239 int err;
240 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
241 bool is_wb_truncate = is_truncate && fc->writeback_cache;
242 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
243
244 if (fuse_is_bad(inode))
245 return -EIO;
246
247 err = generic_file_open(inode, file);
248 if (err)
249 return err;
250
251 if (is_wb_truncate || dax_truncate)
252 inode_lock(inode);
253
254 if (dax_truncate) {
255 filemap_invalidate_lock(inode->i_mapping);
256 err = fuse_dax_break_layouts(inode, 0, 0);
257 if (err)
258 goto out_inode_unlock;
259 }
260
261 if (is_wb_truncate || dax_truncate)
262 fuse_set_nowrite(inode);
263
264 err = fuse_do_open(fm, get_node_id(inode), file, false);
265 if (!err) {
266 ff = file->private_data;
267 err = fuse_finish_open(inode, file);
268 if (err)
269 fuse_sync_release(fi, ff, file->f_flags);
270 else if (is_truncate)
271 fuse_truncate_update_attr(inode, file);
272 }
273
274 if (is_wb_truncate || dax_truncate)
275 fuse_release_nowrite(inode);
276 if (!err) {
277 if (is_truncate)
278 truncate_pagecache(inode, 0);
279 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
280 invalidate_inode_pages2(inode->i_mapping);
281 }
282 if (dax_truncate)
283 filemap_invalidate_unlock(inode->i_mapping);
284 out_inode_unlock:
285 if (is_wb_truncate || dax_truncate)
286 inode_unlock(inode);
287
288 return err;
289 }
290
fuse_prepare_release(struct fuse_inode * fi,struct fuse_file * ff,unsigned int flags,int opcode,bool sync)291 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
292 unsigned int flags, int opcode, bool sync)
293 {
294 struct fuse_conn *fc = ff->fm->fc;
295 struct fuse_release_args *ra = &ff->args->release_args;
296
297 if (fuse_file_passthrough(ff))
298 fuse_passthrough_release(ff, fuse_inode_backing(fi));
299
300 /* Inode is NULL on error path of fuse_create_open() */
301 if (likely(fi)) {
302 spin_lock(&fi->lock);
303 list_del(&ff->write_entry);
304 spin_unlock(&fi->lock);
305 }
306 spin_lock(&fc->lock);
307 if (!RB_EMPTY_NODE(&ff->polled_node))
308 rb_erase(&ff->polled_node, &fc->polled_files);
309 spin_unlock(&fc->lock);
310
311 wake_up_interruptible_all(&ff->poll_wait);
312
313 if (!ra)
314 return;
315
316 /* ff->args was used for open outarg */
317 memset(ff->args, 0, sizeof(*ff->args));
318 ra->inarg.fh = ff->fh;
319 ra->inarg.flags = flags;
320 ra->args.in_numargs = 1;
321 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
322 ra->args.in_args[0].value = &ra->inarg;
323 ra->args.opcode = opcode;
324 ra->args.nodeid = ff->nodeid;
325 ra->args.force = true;
326 ra->args.nocreds = true;
327
328 /*
329 * Hold inode until release is finished.
330 * From fuse_sync_release() the refcount is 1 and everything's
331 * synchronous, so we are fine with not doing igrab() here.
332 */
333 ra->inode = sync ? NULL : igrab(&fi->inode);
334 }
335
fuse_file_release(struct inode * inode,struct fuse_file * ff,unsigned int open_flags,fl_owner_t id,bool isdir)336 void fuse_file_release(struct inode *inode, struct fuse_file *ff,
337 unsigned int open_flags, fl_owner_t id, bool isdir)
338 {
339 struct fuse_inode *fi = get_fuse_inode(inode);
340 struct fuse_release_args *ra = &ff->args->release_args;
341 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
342
343 fuse_prepare_release(fi, ff, open_flags, opcode, false);
344
345 if (ra && ff->flock) {
346 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
347 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
348 }
349
350 /*
351 * Normally this will send the RELEASE request, however if
352 * some asynchronous READ or WRITE requests are outstanding,
353 * the sending will be delayed.
354 *
355 * Make the release synchronous if this is a fuseblk mount,
356 * synchronous RELEASE is allowed (and desirable) in this case
357 * because the server can be trusted not to screw up.
358 */
359 fuse_file_put(ff, ff->fm->fc->destroy);
360 }
361
fuse_release_common(struct file * file,bool isdir)362 void fuse_release_common(struct file *file, bool isdir)
363 {
364 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
365 (fl_owner_t) file, isdir);
366 }
367
fuse_release(struct inode * inode,struct file * file)368 static int fuse_release(struct inode *inode, struct file *file)
369 {
370 struct fuse_conn *fc = get_fuse_conn(inode);
371
372 /*
373 * Dirty pages might remain despite write_inode_now() call from
374 * fuse_flush() due to writes racing with the close.
375 */
376 if (fc->writeback_cache)
377 write_inode_now(inode, 1);
378
379 fuse_release_common(file, false);
380
381 /* return value is ignored by VFS */
382 return 0;
383 }
384
fuse_sync_release(struct fuse_inode * fi,struct fuse_file * ff,unsigned int flags)385 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
386 unsigned int flags)
387 {
388 WARN_ON(refcount_read(&ff->count) > 1);
389 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
390 fuse_file_put(ff, true);
391 }
392 EXPORT_SYMBOL_GPL(fuse_sync_release);
393
394 /*
395 * Scramble the ID space with XTEA, so that the value of the files_struct
396 * pointer is not exposed to userspace.
397 */
fuse_lock_owner_id(struct fuse_conn * fc,fl_owner_t id)398 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
399 {
400 u32 *k = fc->scramble_key;
401 u64 v = (unsigned long) id;
402 u32 v0 = v;
403 u32 v1 = v >> 32;
404 u32 sum = 0;
405 int i;
406
407 for (i = 0; i < 32; i++) {
408 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
409 sum += 0x9E3779B9;
410 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
411 }
412
413 return (u64) v0 + ((u64) v1 << 32);
414 }
415
416 struct fuse_writepage_args {
417 struct fuse_io_args ia;
418 struct rb_node writepages_entry;
419 struct list_head queue_entry;
420 struct fuse_writepage_args *next;
421 struct inode *inode;
422 struct fuse_sync_bucket *bucket;
423 };
424
fuse_find_writeback(struct fuse_inode * fi,pgoff_t idx_from,pgoff_t idx_to)425 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
426 pgoff_t idx_from, pgoff_t idx_to)
427 {
428 struct rb_node *n;
429
430 n = fi->writepages.rb_node;
431
432 while (n) {
433 struct fuse_writepage_args *wpa;
434 pgoff_t curr_index;
435
436 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
437 WARN_ON(get_fuse_inode(wpa->inode) != fi);
438 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
439 if (idx_from >= curr_index + wpa->ia.ap.num_pages)
440 n = n->rb_right;
441 else if (idx_to < curr_index)
442 n = n->rb_left;
443 else
444 return wpa;
445 }
446 return NULL;
447 }
448
449 /*
450 * Check if any page in a range is under writeback
451 *
452 * This is currently done by walking the list of writepage requests
453 * for the inode, which can be pretty inefficient.
454 */
fuse_range_is_writeback(struct inode * inode,pgoff_t idx_from,pgoff_t idx_to)455 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
456 pgoff_t idx_to)
457 {
458 struct fuse_inode *fi = get_fuse_inode(inode);
459 bool found;
460
461 spin_lock(&fi->lock);
462 found = fuse_find_writeback(fi, idx_from, idx_to);
463 spin_unlock(&fi->lock);
464
465 return found;
466 }
467
fuse_page_is_writeback(struct inode * inode,pgoff_t index)468 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
469 {
470 return fuse_range_is_writeback(inode, index, index);
471 }
472
473 /*
474 * Wait for page writeback to be completed.
475 *
476 * Since fuse doesn't rely on the VM writeback tracking, this has to
477 * use some other means.
478 */
fuse_wait_on_page_writeback(struct inode * inode,pgoff_t index)479 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
480 {
481 struct fuse_inode *fi = get_fuse_inode(inode);
482
483 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
484 }
485
486 /*
487 * Wait for all pending writepages on the inode to finish.
488 *
489 * This is currently done by blocking further writes with FUSE_NOWRITE
490 * and waiting for all sent writes to complete.
491 *
492 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
493 * could conflict with truncation.
494 */
fuse_sync_writes(struct inode * inode)495 static void fuse_sync_writes(struct inode *inode)
496 {
497 fuse_set_nowrite(inode);
498 fuse_release_nowrite(inode);
499 }
500
fuse_flush(struct file * file,fl_owner_t id)501 static int fuse_flush(struct file *file, fl_owner_t id)
502 {
503 struct inode *inode = file_inode(file);
504 struct fuse_mount *fm = get_fuse_mount(inode);
505 struct fuse_file *ff = file->private_data;
506 struct fuse_flush_in inarg;
507 FUSE_ARGS(args);
508 int err;
509
510 if (fuse_is_bad(inode))
511 return -EIO;
512
513 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
514 return 0;
515
516 err = write_inode_now(inode, 1);
517 if (err)
518 return err;
519
520 inode_lock(inode);
521 fuse_sync_writes(inode);
522 inode_unlock(inode);
523
524 err = filemap_check_errors(file->f_mapping);
525 if (err)
526 return err;
527
528 err = 0;
529 if (fm->fc->no_flush)
530 goto inval_attr_out;
531
532 memset(&inarg, 0, sizeof(inarg));
533 inarg.fh = ff->fh;
534 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
535 args.opcode = FUSE_FLUSH;
536 args.nodeid = get_node_id(inode);
537 args.in_numargs = 1;
538 args.in_args[0].size = sizeof(inarg);
539 args.in_args[0].value = &inarg;
540 args.force = true;
541
542 err = fuse_simple_request(fm, &args);
543 if (err == -ENOSYS) {
544 fm->fc->no_flush = 1;
545 err = 0;
546 }
547
548 inval_attr_out:
549 /*
550 * In memory i_blocks is not maintained by fuse, if writeback cache is
551 * enabled, i_blocks from cached attr may not be accurate.
552 */
553 if (!err && fm->fc->writeback_cache)
554 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
555 return err;
556 }
557
fuse_fsync_common(struct file * file,loff_t start,loff_t end,int datasync,int opcode)558 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
559 int datasync, int opcode)
560 {
561 struct inode *inode = file->f_mapping->host;
562 struct fuse_mount *fm = get_fuse_mount(inode);
563 struct fuse_file *ff = file->private_data;
564 FUSE_ARGS(args);
565 struct fuse_fsync_in inarg;
566
567 memset(&inarg, 0, sizeof(inarg));
568 inarg.fh = ff->fh;
569 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
570 args.opcode = opcode;
571 args.nodeid = get_node_id(inode);
572 args.in_numargs = 1;
573 args.in_args[0].size = sizeof(inarg);
574 args.in_args[0].value = &inarg;
575 return fuse_simple_request(fm, &args);
576 }
577
fuse_fsync(struct file * file,loff_t start,loff_t end,int datasync)578 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
579 int datasync)
580 {
581 struct inode *inode = file->f_mapping->host;
582 struct fuse_conn *fc = get_fuse_conn(inode);
583 int err;
584
585 if (fuse_is_bad(inode))
586 return -EIO;
587
588 inode_lock(inode);
589
590 /*
591 * Start writeback against all dirty pages of the inode, then
592 * wait for all outstanding writes, before sending the FSYNC
593 * request.
594 */
595 err = file_write_and_wait_range(file, start, end);
596 if (err)
597 goto out;
598
599 fuse_sync_writes(inode);
600
601 /*
602 * Due to implementation of fuse writeback
603 * file_write_and_wait_range() does not catch errors.
604 * We have to do this directly after fuse_sync_writes()
605 */
606 err = file_check_and_advance_wb_err(file);
607 if (err)
608 goto out;
609
610 err = sync_inode_metadata(inode, 1);
611 if (err)
612 goto out;
613
614 if (fc->no_fsync)
615 goto out;
616
617 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
618 if (err == -ENOSYS) {
619 fc->no_fsync = 1;
620 err = 0;
621 }
622 out:
623 inode_unlock(inode);
624
625 return err;
626 }
627
fuse_read_args_fill(struct fuse_io_args * ia,struct file * file,loff_t pos,size_t count,int opcode)628 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
629 size_t count, int opcode)
630 {
631 struct fuse_file *ff = file->private_data;
632 struct fuse_args *args = &ia->ap.args;
633
634 ia->read.in.fh = ff->fh;
635 ia->read.in.offset = pos;
636 ia->read.in.size = count;
637 ia->read.in.flags = file->f_flags;
638 args->opcode = opcode;
639 args->nodeid = ff->nodeid;
640 args->in_numargs = 1;
641 args->in_args[0].size = sizeof(ia->read.in);
642 args->in_args[0].value = &ia->read.in;
643 args->out_argvar = true;
644 args->out_numargs = 1;
645 args->out_args[0].size = count;
646 }
647
fuse_release_user_pages(struct fuse_args_pages * ap,bool should_dirty)648 static void fuse_release_user_pages(struct fuse_args_pages *ap,
649 bool should_dirty)
650 {
651 unsigned int i;
652
653 for (i = 0; i < ap->num_pages; i++) {
654 if (should_dirty)
655 set_page_dirty_lock(ap->pages[i]);
656 if (ap->args.is_pinned)
657 unpin_user_page(ap->pages[i]);
658 }
659 }
660
fuse_io_release(struct kref * kref)661 static void fuse_io_release(struct kref *kref)
662 {
663 kfree(container_of(kref, struct fuse_io_priv, refcnt));
664 }
665
fuse_get_res_by_io(struct fuse_io_priv * io)666 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
667 {
668 if (io->err)
669 return io->err;
670
671 if (io->bytes >= 0 && io->write)
672 return -EIO;
673
674 return io->bytes < 0 ? io->size : io->bytes;
675 }
676
677 /*
678 * In case of short read, the caller sets 'pos' to the position of
679 * actual end of fuse request in IO request. Otherwise, if bytes_requested
680 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
681 *
682 * An example:
683 * User requested DIO read of 64K. It was split into two 32K fuse requests,
684 * both submitted asynchronously. The first of them was ACKed by userspace as
685 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
686 * second request was ACKed as short, e.g. only 1K was read, resulting in
687 * pos == 33K.
688 *
689 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
690 * will be equal to the length of the longest contiguous fragment of
691 * transferred data starting from the beginning of IO request.
692 */
fuse_aio_complete(struct fuse_io_priv * io,int err,ssize_t pos)693 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
694 {
695 int left;
696
697 spin_lock(&io->lock);
698 if (err)
699 io->err = io->err ? : err;
700 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
701 io->bytes = pos;
702
703 left = --io->reqs;
704 if (!left && io->blocking)
705 complete(io->done);
706 spin_unlock(&io->lock);
707
708 if (!left && !io->blocking) {
709 ssize_t res = fuse_get_res_by_io(io);
710
711 if (res >= 0) {
712 struct inode *inode = file_inode(io->iocb->ki_filp);
713 struct fuse_conn *fc = get_fuse_conn(inode);
714 struct fuse_inode *fi = get_fuse_inode(inode);
715
716 spin_lock(&fi->lock);
717 fi->attr_version = atomic64_inc_return(&fc->attr_version);
718 spin_unlock(&fi->lock);
719 }
720
721 io->iocb->ki_complete(io->iocb, res);
722 }
723
724 kref_put(&io->refcnt, fuse_io_release);
725 }
726
fuse_io_alloc(struct fuse_io_priv * io,unsigned int npages)727 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
728 unsigned int npages)
729 {
730 struct fuse_io_args *ia;
731
732 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
733 if (ia) {
734 ia->io = io;
735 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
736 &ia->ap.descs);
737 if (!ia->ap.pages) {
738 kfree(ia);
739 ia = NULL;
740 }
741 }
742 return ia;
743 }
744
fuse_io_free(struct fuse_io_args * ia)745 static void fuse_io_free(struct fuse_io_args *ia)
746 {
747 kfree(ia->ap.pages);
748 kfree(ia);
749 }
750
fuse_aio_complete_req(struct fuse_mount * fm,struct fuse_args * args,int err)751 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
752 int err)
753 {
754 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
755 struct fuse_io_priv *io = ia->io;
756 ssize_t pos = -1;
757
758 fuse_release_user_pages(&ia->ap, io->should_dirty);
759
760 if (err) {
761 /* Nothing */
762 } else if (io->write) {
763 if (ia->write.out.size > ia->write.in.size) {
764 err = -EIO;
765 } else if (ia->write.in.size != ia->write.out.size) {
766 pos = ia->write.in.offset - io->offset +
767 ia->write.out.size;
768 }
769 } else {
770 u32 outsize = args->out_args[0].size;
771
772 if (ia->read.in.size != outsize)
773 pos = ia->read.in.offset - io->offset + outsize;
774 }
775
776 fuse_aio_complete(io, err, pos);
777 fuse_io_free(ia);
778 }
779
fuse_async_req_send(struct fuse_mount * fm,struct fuse_io_args * ia,size_t num_bytes)780 static ssize_t fuse_async_req_send(struct fuse_mount *fm,
781 struct fuse_io_args *ia, size_t num_bytes)
782 {
783 ssize_t err;
784 struct fuse_io_priv *io = ia->io;
785
786 spin_lock(&io->lock);
787 kref_get(&io->refcnt);
788 io->size += num_bytes;
789 io->reqs++;
790 spin_unlock(&io->lock);
791
792 ia->ap.args.end = fuse_aio_complete_req;
793 ia->ap.args.may_block = io->should_dirty;
794 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
795 if (err)
796 fuse_aio_complete_req(fm, &ia->ap.args, err);
797
798 return num_bytes;
799 }
800
fuse_send_read(struct fuse_io_args * ia,loff_t pos,size_t count,fl_owner_t owner)801 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
802 fl_owner_t owner)
803 {
804 struct file *file = ia->io->iocb->ki_filp;
805 struct fuse_file *ff = file->private_data;
806 struct fuse_mount *fm = ff->fm;
807
808 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
809 if (owner != NULL) {
810 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
811 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
812 }
813
814 if (ia->io->async)
815 return fuse_async_req_send(fm, ia, count);
816
817 return fuse_simple_request(fm, &ia->ap.args);
818 }
819
fuse_read_update_size(struct inode * inode,loff_t size,u64 attr_ver)820 static void fuse_read_update_size(struct inode *inode, loff_t size,
821 u64 attr_ver)
822 {
823 struct fuse_conn *fc = get_fuse_conn(inode);
824 struct fuse_inode *fi = get_fuse_inode(inode);
825
826 spin_lock(&fi->lock);
827 if (attr_ver >= fi->attr_version && size < inode->i_size &&
828 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
829 fi->attr_version = atomic64_inc_return(&fc->attr_version);
830 i_size_write(inode, size);
831 }
832 spin_unlock(&fi->lock);
833 }
834
fuse_short_read(struct inode * inode,u64 attr_ver,size_t num_read,struct fuse_args_pages * ap)835 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
836 struct fuse_args_pages *ap)
837 {
838 struct fuse_conn *fc = get_fuse_conn(inode);
839
840 /*
841 * If writeback_cache is enabled, a short read means there's a hole in
842 * the file. Some data after the hole is in page cache, but has not
843 * reached the client fs yet. So the hole is not present there.
844 */
845 if (!fc->writeback_cache) {
846 loff_t pos = page_offset(ap->pages[0]) + num_read;
847 fuse_read_update_size(inode, pos, attr_ver);
848 }
849 }
850
fuse_do_readpage(struct file * file,struct page * page)851 static int fuse_do_readpage(struct file *file, struct page *page)
852 {
853 struct inode *inode = page->mapping->host;
854 struct fuse_mount *fm = get_fuse_mount(inode);
855 loff_t pos = page_offset(page);
856 struct fuse_page_desc desc = { .length = PAGE_SIZE };
857 struct fuse_io_args ia = {
858 .ap.args.page_zeroing = true,
859 .ap.args.out_pages = true,
860 .ap.num_pages = 1,
861 .ap.pages = &page,
862 .ap.descs = &desc,
863 };
864 ssize_t res;
865 u64 attr_ver;
866
867 /*
868 * Page writeback can extend beyond the lifetime of the
869 * page-cache page, so make sure we read a properly synced
870 * page.
871 */
872 fuse_wait_on_page_writeback(inode, page->index);
873
874 attr_ver = fuse_get_attr_version(fm->fc);
875
876 /* Don't overflow end offset */
877 if (pos + (desc.length - 1) == LLONG_MAX)
878 desc.length--;
879
880 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
881 res = fuse_simple_request(fm, &ia.ap.args);
882 if (res < 0)
883 return res;
884 /*
885 * Short read means EOF. If file size is larger, truncate it
886 */
887 if (res < desc.length)
888 fuse_short_read(inode, attr_ver, res, &ia.ap);
889
890 SetPageUptodate(page);
891
892 return 0;
893 }
894
fuse_read_folio(struct file * file,struct folio * folio)895 static int fuse_read_folio(struct file *file, struct folio *folio)
896 {
897 struct page *page = &folio->page;
898 struct inode *inode = page->mapping->host;
899 int err;
900
901 err = -EIO;
902 if (fuse_is_bad(inode))
903 goto out;
904
905 err = fuse_do_readpage(file, page);
906 fuse_invalidate_atime(inode);
907 out:
908 unlock_page(page);
909 return err;
910 }
911
fuse_readpages_end(struct fuse_mount * fm,struct fuse_args * args,int err)912 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
913 int err)
914 {
915 int i;
916 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
917 struct fuse_args_pages *ap = &ia->ap;
918 size_t count = ia->read.in.size;
919 size_t num_read = args->out_args[0].size;
920 struct address_space *mapping = NULL;
921
922 for (i = 0; mapping == NULL && i < ap->num_pages; i++)
923 mapping = ap->pages[i]->mapping;
924
925 if (mapping) {
926 struct inode *inode = mapping->host;
927
928 /*
929 * Short read means EOF. If file size is larger, truncate it
930 */
931 if (!err && num_read < count)
932 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
933
934 fuse_invalidate_atime(inode);
935 }
936
937 for (i = 0; i < ap->num_pages; i++) {
938 struct folio *folio = page_folio(ap->pages[i]);
939
940 folio_end_read(folio, !err);
941 folio_put(folio);
942 }
943 if (ia->ff)
944 fuse_file_put(ia->ff, false);
945
946 fuse_io_free(ia);
947 }
948
fuse_send_readpages(struct fuse_io_args * ia,struct file * file)949 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
950 {
951 struct fuse_file *ff = file->private_data;
952 struct fuse_mount *fm = ff->fm;
953 struct fuse_args_pages *ap = &ia->ap;
954 loff_t pos = page_offset(ap->pages[0]);
955 size_t count = ap->num_pages << PAGE_SHIFT;
956 ssize_t res;
957 int err;
958
959 ap->args.out_pages = true;
960 ap->args.page_zeroing = true;
961 ap->args.page_replace = true;
962
963 /* Don't overflow end offset */
964 if (pos + (count - 1) == LLONG_MAX) {
965 count--;
966 ap->descs[ap->num_pages - 1].length--;
967 }
968 WARN_ON((loff_t) (pos + count) < 0);
969
970 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
971 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
972 if (fm->fc->async_read) {
973 ia->ff = fuse_file_get(ff);
974 ap->args.end = fuse_readpages_end;
975 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
976 if (!err)
977 return;
978 } else {
979 res = fuse_simple_request(fm, &ap->args);
980 err = res < 0 ? res : 0;
981 }
982 fuse_readpages_end(fm, &ap->args, err);
983 }
984
fuse_readahead(struct readahead_control * rac)985 static void fuse_readahead(struct readahead_control *rac)
986 {
987 struct inode *inode = rac->mapping->host;
988 struct fuse_conn *fc = get_fuse_conn(inode);
989 unsigned int i, max_pages, nr_pages = 0;
990
991 if (fuse_is_bad(inode))
992 return;
993
994 max_pages = min_t(unsigned int, fc->max_pages,
995 fc->max_read / PAGE_SIZE);
996
997 for (;;) {
998 struct fuse_io_args *ia;
999 struct fuse_args_pages *ap;
1000
1001 if (fc->num_background >= fc->congestion_threshold &&
1002 rac->ra->async_size >= readahead_count(rac))
1003 /*
1004 * Congested and only async pages left, so skip the
1005 * rest.
1006 */
1007 break;
1008
1009 nr_pages = readahead_count(rac) - nr_pages;
1010 if (nr_pages > max_pages)
1011 nr_pages = max_pages;
1012 if (nr_pages == 0)
1013 break;
1014 ia = fuse_io_alloc(NULL, nr_pages);
1015 if (!ia)
1016 return;
1017 ap = &ia->ap;
1018 nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
1019 for (i = 0; i < nr_pages; i++) {
1020 fuse_wait_on_page_writeback(inode,
1021 readahead_index(rac) + i);
1022 ap->descs[i].length = PAGE_SIZE;
1023 }
1024 ap->num_pages = nr_pages;
1025 fuse_send_readpages(ia, rac->file);
1026 }
1027 }
1028
fuse_cache_read_iter(struct kiocb * iocb,struct iov_iter * to)1029 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
1030 {
1031 struct inode *inode = iocb->ki_filp->f_mapping->host;
1032 struct fuse_conn *fc = get_fuse_conn(inode);
1033
1034 /*
1035 * In auto invalidate mode, always update attributes on read.
1036 * Otherwise, only update if we attempt to read past EOF (to ensure
1037 * i_size is up to date).
1038 */
1039 if (fc->auto_inval_data ||
1040 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
1041 int err;
1042 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
1043 if (err)
1044 return err;
1045 }
1046
1047 return generic_file_read_iter(iocb, to);
1048 }
1049
fuse_write_args_fill(struct fuse_io_args * ia,struct fuse_file * ff,loff_t pos,size_t count)1050 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1051 loff_t pos, size_t count)
1052 {
1053 struct fuse_args *args = &ia->ap.args;
1054
1055 ia->write.in.fh = ff->fh;
1056 ia->write.in.offset = pos;
1057 ia->write.in.size = count;
1058 args->opcode = FUSE_WRITE;
1059 args->nodeid = ff->nodeid;
1060 args->in_numargs = 2;
1061 if (ff->fm->fc->minor < 9)
1062 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1063 else
1064 args->in_args[0].size = sizeof(ia->write.in);
1065 args->in_args[0].value = &ia->write.in;
1066 args->in_args[1].size = count;
1067 args->out_numargs = 1;
1068 args->out_args[0].size = sizeof(ia->write.out);
1069 args->out_args[0].value = &ia->write.out;
1070 }
1071
fuse_write_flags(struct kiocb * iocb)1072 static unsigned int fuse_write_flags(struct kiocb *iocb)
1073 {
1074 unsigned int flags = iocb->ki_filp->f_flags;
1075
1076 if (iocb_is_dsync(iocb))
1077 flags |= O_DSYNC;
1078 if (iocb->ki_flags & IOCB_SYNC)
1079 flags |= O_SYNC;
1080
1081 return flags;
1082 }
1083
fuse_send_write(struct fuse_io_args * ia,loff_t pos,size_t count,fl_owner_t owner)1084 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1085 size_t count, fl_owner_t owner)
1086 {
1087 struct kiocb *iocb = ia->io->iocb;
1088 struct file *file = iocb->ki_filp;
1089 struct fuse_file *ff = file->private_data;
1090 struct fuse_mount *fm = ff->fm;
1091 struct fuse_write_in *inarg = &ia->write.in;
1092 ssize_t err;
1093
1094 fuse_write_args_fill(ia, ff, pos, count);
1095 inarg->flags = fuse_write_flags(iocb);
1096 if (owner != NULL) {
1097 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
1098 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
1099 }
1100
1101 if (ia->io->async)
1102 return fuse_async_req_send(fm, ia, count);
1103
1104 err = fuse_simple_request(fm, &ia->ap.args);
1105 if (!err && ia->write.out.size > count)
1106 err = -EIO;
1107
1108 return err ?: ia->write.out.size;
1109 }
1110
fuse_write_update_attr(struct inode * inode,loff_t pos,ssize_t written)1111 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
1112 {
1113 struct fuse_conn *fc = get_fuse_conn(inode);
1114 struct fuse_inode *fi = get_fuse_inode(inode);
1115 bool ret = false;
1116
1117 spin_lock(&fi->lock);
1118 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1119 if (written > 0 && pos > inode->i_size) {
1120 i_size_write(inode, pos);
1121 ret = true;
1122 }
1123 spin_unlock(&fi->lock);
1124
1125 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1126
1127 return ret;
1128 }
1129
fuse_send_write_pages(struct fuse_io_args * ia,struct kiocb * iocb,struct inode * inode,loff_t pos,size_t count)1130 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1131 struct kiocb *iocb, struct inode *inode,
1132 loff_t pos, size_t count)
1133 {
1134 struct fuse_args_pages *ap = &ia->ap;
1135 struct file *file = iocb->ki_filp;
1136 struct fuse_file *ff = file->private_data;
1137 struct fuse_mount *fm = ff->fm;
1138 unsigned int offset, i;
1139 bool short_write;
1140 int err;
1141
1142 for (i = 0; i < ap->num_pages; i++)
1143 fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
1144
1145 fuse_write_args_fill(ia, ff, pos, count);
1146 ia->write.in.flags = fuse_write_flags(iocb);
1147 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1148 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1149
1150 err = fuse_simple_request(fm, &ap->args);
1151 if (!err && ia->write.out.size > count)
1152 err = -EIO;
1153
1154 short_write = ia->write.out.size < count;
1155 offset = ap->descs[0].offset;
1156 count = ia->write.out.size;
1157 for (i = 0; i < ap->num_pages; i++) {
1158 struct page *page = ap->pages[i];
1159
1160 if (err) {
1161 ClearPageUptodate(page);
1162 } else {
1163 if (count >= PAGE_SIZE - offset)
1164 count -= PAGE_SIZE - offset;
1165 else {
1166 if (short_write)
1167 ClearPageUptodate(page);
1168 count = 0;
1169 }
1170 offset = 0;
1171 }
1172 if (ia->write.page_locked && (i == ap->num_pages - 1))
1173 unlock_page(page);
1174 put_page(page);
1175 }
1176
1177 return err;
1178 }
1179
fuse_fill_write_pages(struct fuse_io_args * ia,struct address_space * mapping,struct iov_iter * ii,loff_t pos,unsigned int max_pages)1180 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1181 struct address_space *mapping,
1182 struct iov_iter *ii, loff_t pos,
1183 unsigned int max_pages)
1184 {
1185 struct fuse_args_pages *ap = &ia->ap;
1186 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1187 unsigned offset = pos & (PAGE_SIZE - 1);
1188 size_t count = 0;
1189 int err;
1190
1191 ap->args.in_pages = true;
1192 ap->descs[0].offset = offset;
1193
1194 do {
1195 size_t tmp;
1196 struct page *page;
1197 pgoff_t index = pos >> PAGE_SHIFT;
1198 size_t bytes = min_t(size_t, PAGE_SIZE - offset,
1199 iov_iter_count(ii));
1200
1201 bytes = min_t(size_t, bytes, fc->max_write - count);
1202
1203 again:
1204 err = -EFAULT;
1205 if (fault_in_iov_iter_readable(ii, bytes))
1206 break;
1207
1208 err = -ENOMEM;
1209 page = grab_cache_page_write_begin(mapping, index);
1210 if (!page)
1211 break;
1212
1213 if (mapping_writably_mapped(mapping))
1214 flush_dcache_page(page);
1215
1216 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii);
1217 flush_dcache_page(page);
1218
1219 if (!tmp) {
1220 unlock_page(page);
1221 put_page(page);
1222 goto again;
1223 }
1224
1225 err = 0;
1226 ap->pages[ap->num_pages] = page;
1227 ap->descs[ap->num_pages].length = tmp;
1228 ap->num_pages++;
1229
1230 count += tmp;
1231 pos += tmp;
1232 offset += tmp;
1233 if (offset == PAGE_SIZE)
1234 offset = 0;
1235
1236 /* If we copied full page, mark it uptodate */
1237 if (tmp == PAGE_SIZE)
1238 SetPageUptodate(page);
1239
1240 if (PageUptodate(page)) {
1241 unlock_page(page);
1242 } else {
1243 ia->write.page_locked = true;
1244 break;
1245 }
1246 if (!fc->big_writes)
1247 break;
1248 } while (iov_iter_count(ii) && count < fc->max_write &&
1249 ap->num_pages < max_pages && offset == 0);
1250
1251 return count > 0 ? count : err;
1252 }
1253
fuse_wr_pages(loff_t pos,size_t len,unsigned int max_pages)1254 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1255 unsigned int max_pages)
1256 {
1257 return min_t(unsigned int,
1258 ((pos + len - 1) >> PAGE_SHIFT) -
1259 (pos >> PAGE_SHIFT) + 1,
1260 max_pages);
1261 }
1262
fuse_perform_write(struct kiocb * iocb,struct iov_iter * ii)1263 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
1264 {
1265 struct address_space *mapping = iocb->ki_filp->f_mapping;
1266 struct inode *inode = mapping->host;
1267 struct fuse_conn *fc = get_fuse_conn(inode);
1268 struct fuse_inode *fi = get_fuse_inode(inode);
1269 loff_t pos = iocb->ki_pos;
1270 int err = 0;
1271 ssize_t res = 0;
1272
1273 if (inode->i_size < pos + iov_iter_count(ii))
1274 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1275
1276 do {
1277 ssize_t count;
1278 struct fuse_io_args ia = {};
1279 struct fuse_args_pages *ap = &ia.ap;
1280 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1281 fc->max_pages);
1282
1283 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1284 if (!ap->pages) {
1285 err = -ENOMEM;
1286 break;
1287 }
1288
1289 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
1290 if (count <= 0) {
1291 err = count;
1292 } else {
1293 err = fuse_send_write_pages(&ia, iocb, inode,
1294 pos, count);
1295 if (!err) {
1296 size_t num_written = ia.write.out.size;
1297
1298 res += num_written;
1299 pos += num_written;
1300
1301 /* break out of the loop on short write */
1302 if (num_written != count)
1303 err = -EIO;
1304 }
1305 }
1306 kfree(ap->pages);
1307 } while (!err && iov_iter_count(ii));
1308
1309 fuse_write_update_attr(inode, pos, res);
1310 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1311
1312 if (!res)
1313 return err;
1314 iocb->ki_pos += res;
1315 return res;
1316 }
1317
fuse_io_past_eof(struct kiocb * iocb,struct iov_iter * iter)1318 static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
1319 {
1320 struct inode *inode = file_inode(iocb->ki_filp);
1321
1322 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1323 }
1324
1325 /*
1326 * @return true if an exclusive lock for direct IO writes is needed
1327 */
fuse_dio_wr_exclusive_lock(struct kiocb * iocb,struct iov_iter * from)1328 static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
1329 {
1330 struct file *file = iocb->ki_filp;
1331 struct fuse_file *ff = file->private_data;
1332 struct inode *inode = file_inode(iocb->ki_filp);
1333 struct fuse_inode *fi = get_fuse_inode(inode);
1334
1335 /* Server side has to advise that it supports parallel dio writes. */
1336 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
1337 return true;
1338
1339 /*
1340 * Append will need to know the eventual EOF - always needs an
1341 * exclusive lock.
1342 */
1343 if (iocb->ki_flags & IOCB_APPEND)
1344 return true;
1345
1346 /* shared locks are not allowed with parallel page cache IO */
1347 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
1348 return false;
1349
1350 /* Parallel dio beyond EOF is not supported, at least for now. */
1351 if (fuse_io_past_eof(iocb, from))
1352 return true;
1353
1354 return false;
1355 }
1356
fuse_dio_lock(struct kiocb * iocb,struct iov_iter * from,bool * exclusive)1357 static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
1358 bool *exclusive)
1359 {
1360 struct inode *inode = file_inode(iocb->ki_filp);
1361 struct fuse_inode *fi = get_fuse_inode(inode);
1362
1363 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
1364 if (*exclusive) {
1365 inode_lock(inode);
1366 } else {
1367 inode_lock_shared(inode);
1368 /*
1369 * New parallal dio allowed only if inode is not in caching
1370 * mode and denies new opens in caching mode. This check
1371 * should be performed only after taking shared inode lock.
1372 * Previous past eof check was without inode lock and might
1373 * have raced, so check it again.
1374 */
1375 if (fuse_io_past_eof(iocb, from) ||
1376 fuse_inode_uncached_io_start(fi, NULL) != 0) {
1377 inode_unlock_shared(inode);
1378 inode_lock(inode);
1379 *exclusive = true;
1380 }
1381 }
1382 }
1383
fuse_dio_unlock(struct kiocb * iocb,bool exclusive)1384 static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
1385 {
1386 struct inode *inode = file_inode(iocb->ki_filp);
1387 struct fuse_inode *fi = get_fuse_inode(inode);
1388
1389 if (exclusive) {
1390 inode_unlock(inode);
1391 } else {
1392 /* Allow opens in caching mode after last parallel dio end */
1393 fuse_inode_uncached_io_end(fi);
1394 inode_unlock_shared(inode);
1395 }
1396 }
1397
fuse_cache_write_iter(struct kiocb * iocb,struct iov_iter * from)1398 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1399 {
1400 struct file *file = iocb->ki_filp;
1401 struct address_space *mapping = file->f_mapping;
1402 ssize_t written = 0;
1403 struct inode *inode = mapping->host;
1404 ssize_t err, count;
1405 struct fuse_conn *fc = get_fuse_conn(inode);
1406
1407 if (fc->writeback_cache) {
1408 /* Update size (EOF optimization) and mode (SUID clearing) */
1409 err = fuse_update_attributes(mapping->host, file,
1410 STATX_SIZE | STATX_MODE);
1411 if (err)
1412 return err;
1413
1414 if (fc->handle_killpriv_v2 &&
1415 setattr_should_drop_suidgid(&nop_mnt_idmap,
1416 file_inode(file))) {
1417 goto writethrough;
1418 }
1419
1420 return generic_file_write_iter(iocb, from);
1421 }
1422
1423 writethrough:
1424 inode_lock(inode);
1425
1426 err = count = generic_write_checks(iocb, from);
1427 if (err <= 0)
1428 goto out;
1429
1430 task_io_account_write(count);
1431
1432 err = file_remove_privs(file);
1433 if (err)
1434 goto out;
1435
1436 err = file_update_time(file);
1437 if (err)
1438 goto out;
1439
1440 if (iocb->ki_flags & IOCB_DIRECT) {
1441 written = generic_file_direct_write(iocb, from);
1442 if (written < 0 || !iov_iter_count(from))
1443 goto out;
1444 written = direct_write_fallback(iocb, from, written,
1445 fuse_perform_write(iocb, from));
1446 } else {
1447 written = fuse_perform_write(iocb, from);
1448 }
1449 out:
1450 inode_unlock(inode);
1451 if (written > 0)
1452 written = generic_write_sync(iocb, written);
1453
1454 return written ? written : err;
1455 }
1456
fuse_get_user_addr(const struct iov_iter * ii)1457 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1458 {
1459 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
1460 }
1461
fuse_get_frag_size(const struct iov_iter * ii,size_t max_size)1462 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1463 size_t max_size)
1464 {
1465 return min(iov_iter_single_seg_count(ii), max_size);
1466 }
1467
fuse_get_user_pages(struct fuse_args_pages * ap,struct iov_iter * ii,size_t * nbytesp,int write,unsigned int max_pages)1468 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1469 size_t *nbytesp, int write,
1470 unsigned int max_pages)
1471 {
1472 size_t nbytes = 0; /* # bytes already packed in req */
1473 ssize_t ret = 0;
1474
1475 /* Special case for kernel I/O: can copy directly into the buffer */
1476 if (iov_iter_is_kvec(ii)) {
1477 unsigned long user_addr = fuse_get_user_addr(ii);
1478 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1479
1480 if (write)
1481 ap->args.in_args[1].value = (void *) user_addr;
1482 else
1483 ap->args.out_args[0].value = (void *) user_addr;
1484
1485 iov_iter_advance(ii, frag_size);
1486 *nbytesp = frag_size;
1487 return 0;
1488 }
1489
1490 while (nbytes < *nbytesp && ap->num_pages < max_pages) {
1491 unsigned npages;
1492 size_t start;
1493 struct page **pt_pages;
1494
1495 pt_pages = &ap->pages[ap->num_pages];
1496 ret = iov_iter_extract_pages(ii, &pt_pages,
1497 *nbytesp - nbytes,
1498 max_pages - ap->num_pages,
1499 0, &start);
1500 if (ret < 0)
1501 break;
1502
1503 nbytes += ret;
1504
1505 ret += start;
1506 npages = DIV_ROUND_UP(ret, PAGE_SIZE);
1507
1508 ap->descs[ap->num_pages].offset = start;
1509 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
1510
1511 ap->num_pages += npages;
1512 ap->descs[ap->num_pages - 1].length -=
1513 (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
1514 }
1515
1516 ap->args.is_pinned = iov_iter_extract_will_pin(ii);
1517 ap->args.user_pages = true;
1518 if (write)
1519 ap->args.in_pages = true;
1520 else
1521 ap->args.out_pages = true;
1522
1523 *nbytesp = nbytes;
1524
1525 return ret < 0 ? ret : 0;
1526 }
1527
fuse_direct_io(struct fuse_io_priv * io,struct iov_iter * iter,loff_t * ppos,int flags)1528 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1529 loff_t *ppos, int flags)
1530 {
1531 int write = flags & FUSE_DIO_WRITE;
1532 int cuse = flags & FUSE_DIO_CUSE;
1533 struct file *file = io->iocb->ki_filp;
1534 struct address_space *mapping = file->f_mapping;
1535 struct inode *inode = mapping->host;
1536 struct fuse_file *ff = file->private_data;
1537 struct fuse_conn *fc = ff->fm->fc;
1538 size_t nmax = write ? fc->max_write : fc->max_read;
1539 loff_t pos = *ppos;
1540 size_t count = iov_iter_count(iter);
1541 pgoff_t idx_from = pos >> PAGE_SHIFT;
1542 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1543 ssize_t res = 0;
1544 int err = 0;
1545 struct fuse_io_args *ia;
1546 unsigned int max_pages;
1547 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
1548
1549 max_pages = iov_iter_npages(iter, fc->max_pages);
1550 ia = fuse_io_alloc(io, max_pages);
1551 if (!ia)
1552 return -ENOMEM;
1553
1554 if (fopen_direct_io && fc->direct_io_allow_mmap) {
1555 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1556 if (res) {
1557 fuse_io_free(ia);
1558 return res;
1559 }
1560 }
1561 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
1562 if (!write)
1563 inode_lock(inode);
1564 fuse_sync_writes(inode);
1565 if (!write)
1566 inode_unlock(inode);
1567 }
1568
1569 if (fopen_direct_io && write) {
1570 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1571 if (res) {
1572 fuse_io_free(ia);
1573 return res;
1574 }
1575 }
1576
1577 io->should_dirty = !write && user_backed_iter(iter);
1578 while (count) {
1579 ssize_t nres;
1580 fl_owner_t owner = current->files;
1581 size_t nbytes = min(count, nmax);
1582
1583 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1584 max_pages);
1585 if (err && !nbytes)
1586 break;
1587
1588 if (write) {
1589 if (!capable(CAP_FSETID))
1590 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1591
1592 nres = fuse_send_write(ia, pos, nbytes, owner);
1593 } else {
1594 nres = fuse_send_read(ia, pos, nbytes, owner);
1595 }
1596
1597 if (!io->async || nres < 0) {
1598 fuse_release_user_pages(&ia->ap, io->should_dirty);
1599 fuse_io_free(ia);
1600 }
1601 ia = NULL;
1602 if (nres < 0) {
1603 iov_iter_revert(iter, nbytes);
1604 err = nres;
1605 break;
1606 }
1607 WARN_ON(nres > nbytes);
1608
1609 count -= nres;
1610 res += nres;
1611 pos += nres;
1612 if (nres != nbytes) {
1613 iov_iter_revert(iter, nbytes - nres);
1614 break;
1615 }
1616 if (count) {
1617 max_pages = iov_iter_npages(iter, fc->max_pages);
1618 ia = fuse_io_alloc(io, max_pages);
1619 if (!ia)
1620 break;
1621 }
1622 }
1623 if (ia)
1624 fuse_io_free(ia);
1625 if (res > 0)
1626 *ppos = pos;
1627
1628 return res > 0 ? res : err;
1629 }
1630 EXPORT_SYMBOL_GPL(fuse_direct_io);
1631
__fuse_direct_read(struct fuse_io_priv * io,struct iov_iter * iter,loff_t * ppos)1632 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1633 struct iov_iter *iter,
1634 loff_t *ppos)
1635 {
1636 ssize_t res;
1637 struct inode *inode = file_inode(io->iocb->ki_filp);
1638
1639 res = fuse_direct_io(io, iter, ppos, 0);
1640
1641 fuse_invalidate_atime(inode);
1642
1643 return res;
1644 }
1645
1646 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1647
fuse_direct_read_iter(struct kiocb * iocb,struct iov_iter * to)1648 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1649 {
1650 ssize_t res;
1651
1652 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1653 res = fuse_direct_IO(iocb, to);
1654 } else {
1655 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1656
1657 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1658 }
1659
1660 return res;
1661 }
1662
fuse_direct_write_iter(struct kiocb * iocb,struct iov_iter * from)1663 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1664 {
1665 struct inode *inode = file_inode(iocb->ki_filp);
1666 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1667 ssize_t res;
1668 bool exclusive;
1669
1670 fuse_dio_lock(iocb, from, &exclusive);
1671 res = generic_write_checks(iocb, from);
1672 if (res > 0) {
1673 task_io_account_write(res);
1674 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
1675 res = fuse_direct_IO(iocb, from);
1676 } else {
1677 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1678 FUSE_DIO_WRITE);
1679 fuse_write_update_attr(inode, iocb->ki_pos, res);
1680 }
1681 }
1682 fuse_dio_unlock(iocb, exclusive);
1683
1684 return res;
1685 }
1686
fuse_file_read_iter(struct kiocb * iocb,struct iov_iter * to)1687 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1688 {
1689 struct file *file = iocb->ki_filp;
1690 struct fuse_file *ff = file->private_data;
1691 struct inode *inode = file_inode(file);
1692
1693 if (fuse_is_bad(inode))
1694 return -EIO;
1695
1696 if (FUSE_IS_DAX(inode))
1697 return fuse_dax_read_iter(iocb, to);
1698
1699 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1700 if (ff->open_flags & FOPEN_DIRECT_IO)
1701 return fuse_direct_read_iter(iocb, to);
1702 else if (fuse_file_passthrough(ff))
1703 return fuse_passthrough_read_iter(iocb, to);
1704 else
1705 return fuse_cache_read_iter(iocb, to);
1706 }
1707
fuse_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1708 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1709 {
1710 struct file *file = iocb->ki_filp;
1711 struct fuse_file *ff = file->private_data;
1712 struct inode *inode = file_inode(file);
1713
1714 if (fuse_is_bad(inode))
1715 return -EIO;
1716
1717 if (FUSE_IS_DAX(inode))
1718 return fuse_dax_write_iter(iocb, from);
1719
1720 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1721 if (ff->open_flags & FOPEN_DIRECT_IO)
1722 return fuse_direct_write_iter(iocb, from);
1723 else if (fuse_file_passthrough(ff))
1724 return fuse_passthrough_write_iter(iocb, from);
1725 else
1726 return fuse_cache_write_iter(iocb, from);
1727 }
1728
fuse_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1729 static ssize_t fuse_splice_read(struct file *in, loff_t *ppos,
1730 struct pipe_inode_info *pipe, size_t len,
1731 unsigned int flags)
1732 {
1733 struct fuse_file *ff = in->private_data;
1734
1735 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1736 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1737 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags);
1738 else
1739 return filemap_splice_read(in, ppos, pipe, len, flags);
1740 }
1741
fuse_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1742 static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
1743 loff_t *ppos, size_t len, unsigned int flags)
1744 {
1745 struct fuse_file *ff = out->private_data;
1746
1747 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1748 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1749 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags);
1750 else
1751 return iter_file_splice_write(pipe, out, ppos, len, flags);
1752 }
1753
fuse_writepage_free(struct fuse_writepage_args * wpa)1754 static void fuse_writepage_free(struct fuse_writepage_args *wpa)
1755 {
1756 struct fuse_args_pages *ap = &wpa->ia.ap;
1757 int i;
1758
1759 if (wpa->bucket)
1760 fuse_sync_bucket_dec(wpa->bucket);
1761
1762 for (i = 0; i < ap->num_pages; i++)
1763 __free_page(ap->pages[i]);
1764
1765 if (wpa->ia.ff)
1766 fuse_file_put(wpa->ia.ff, false);
1767
1768 kfree(ap->pages);
1769 kfree(wpa);
1770 }
1771
fuse_writepage_finish(struct fuse_mount * fm,struct fuse_writepage_args * wpa)1772 static void fuse_writepage_finish(struct fuse_mount *fm,
1773 struct fuse_writepage_args *wpa)
1774 {
1775 struct fuse_args_pages *ap = &wpa->ia.ap;
1776 struct inode *inode = wpa->inode;
1777 struct fuse_inode *fi = get_fuse_inode(inode);
1778 struct backing_dev_info *bdi = inode_to_bdi(inode);
1779 int i;
1780
1781 for (i = 0; i < ap->num_pages; i++) {
1782 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1783 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
1784 wb_writeout_inc(&bdi->wb);
1785 }
1786 wake_up(&fi->page_waitq);
1787 }
1788
1789 /* Called under fi->lock, may release and reacquire it */
fuse_send_writepage(struct fuse_mount * fm,struct fuse_writepage_args * wpa,loff_t size)1790 static void fuse_send_writepage(struct fuse_mount *fm,
1791 struct fuse_writepage_args *wpa, loff_t size)
1792 __releases(fi->lock)
1793 __acquires(fi->lock)
1794 {
1795 struct fuse_writepage_args *aux, *next;
1796 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1797 struct fuse_write_in *inarg = &wpa->ia.write.in;
1798 struct fuse_args *args = &wpa->ia.ap.args;
1799 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
1800 int err;
1801
1802 fi->writectr++;
1803 if (inarg->offset + data_size <= size) {
1804 inarg->size = data_size;
1805 } else if (inarg->offset < size) {
1806 inarg->size = size - inarg->offset;
1807 } else {
1808 /* Got truncated off completely */
1809 goto out_free;
1810 }
1811
1812 args->in_args[1].size = inarg->size;
1813 args->force = true;
1814 args->nocreds = true;
1815
1816 err = fuse_simple_background(fm, args, GFP_ATOMIC);
1817 if (err == -ENOMEM) {
1818 spin_unlock(&fi->lock);
1819 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
1820 spin_lock(&fi->lock);
1821 }
1822
1823 /* Fails on broken connection only */
1824 if (unlikely(err))
1825 goto out_free;
1826
1827 return;
1828
1829 out_free:
1830 fi->writectr--;
1831 rb_erase(&wpa->writepages_entry, &fi->writepages);
1832 fuse_writepage_finish(fm, wpa);
1833 spin_unlock(&fi->lock);
1834
1835 /* After rb_erase() aux request list is private */
1836 for (aux = wpa->next; aux; aux = next) {
1837 struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
1838
1839 next = aux->next;
1840 aux->next = NULL;
1841
1842 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1843 dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
1844 wb_writeout_inc(&bdi->wb);
1845 fuse_writepage_free(aux);
1846 }
1847
1848 fuse_writepage_free(wpa);
1849 spin_lock(&fi->lock);
1850 }
1851
1852 /*
1853 * If fi->writectr is positive (no truncate or fsync going on) send
1854 * all queued writepage requests.
1855 *
1856 * Called with fi->lock
1857 */
fuse_flush_writepages(struct inode * inode)1858 void fuse_flush_writepages(struct inode *inode)
1859 __releases(fi->lock)
1860 __acquires(fi->lock)
1861 {
1862 struct fuse_mount *fm = get_fuse_mount(inode);
1863 struct fuse_inode *fi = get_fuse_inode(inode);
1864 loff_t crop = i_size_read(inode);
1865 struct fuse_writepage_args *wpa;
1866
1867 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1868 wpa = list_entry(fi->queued_writes.next,
1869 struct fuse_writepage_args, queue_entry);
1870 list_del_init(&wpa->queue_entry);
1871 fuse_send_writepage(fm, wpa, crop);
1872 }
1873 }
1874
fuse_insert_writeback(struct rb_root * root,struct fuse_writepage_args * wpa)1875 static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
1876 struct fuse_writepage_args *wpa)
1877 {
1878 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
1879 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
1880 struct rb_node **p = &root->rb_node;
1881 struct rb_node *parent = NULL;
1882
1883 WARN_ON(!wpa->ia.ap.num_pages);
1884 while (*p) {
1885 struct fuse_writepage_args *curr;
1886 pgoff_t curr_index;
1887
1888 parent = *p;
1889 curr = rb_entry(parent, struct fuse_writepage_args,
1890 writepages_entry);
1891 WARN_ON(curr->inode != wpa->inode);
1892 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
1893
1894 if (idx_from >= curr_index + curr->ia.ap.num_pages)
1895 p = &(*p)->rb_right;
1896 else if (idx_to < curr_index)
1897 p = &(*p)->rb_left;
1898 else
1899 return curr;
1900 }
1901
1902 rb_link_node(&wpa->writepages_entry, parent, p);
1903 rb_insert_color(&wpa->writepages_entry, root);
1904 return NULL;
1905 }
1906
tree_insert(struct rb_root * root,struct fuse_writepage_args * wpa)1907 static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
1908 {
1909 WARN_ON(fuse_insert_writeback(root, wpa));
1910 }
1911
fuse_writepage_end(struct fuse_mount * fm,struct fuse_args * args,int error)1912 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
1913 int error)
1914 {
1915 struct fuse_writepage_args *wpa =
1916 container_of(args, typeof(*wpa), ia.ap.args);
1917 struct inode *inode = wpa->inode;
1918 struct fuse_inode *fi = get_fuse_inode(inode);
1919 struct fuse_conn *fc = get_fuse_conn(inode);
1920
1921 mapping_set_error(inode->i_mapping, error);
1922 /*
1923 * A writeback finished and this might have updated mtime/ctime on
1924 * server making local mtime/ctime stale. Hence invalidate attrs.
1925 * Do this only if writeback_cache is not enabled. If writeback_cache
1926 * is enabled, we trust local ctime/mtime.
1927 */
1928 if (!fc->writeback_cache)
1929 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
1930 spin_lock(&fi->lock);
1931 rb_erase(&wpa->writepages_entry, &fi->writepages);
1932 while (wpa->next) {
1933 struct fuse_mount *fm = get_fuse_mount(inode);
1934 struct fuse_write_in *inarg = &wpa->ia.write.in;
1935 struct fuse_writepage_args *next = wpa->next;
1936
1937 wpa->next = next->next;
1938 next->next = NULL;
1939 next->ia.ff = fuse_file_get(wpa->ia.ff);
1940 tree_insert(&fi->writepages, next);
1941
1942 /*
1943 * Skip fuse_flush_writepages() to make it easy to crop requests
1944 * based on primary request size.
1945 *
1946 * 1st case (trivial): there are no concurrent activities using
1947 * fuse_set/release_nowrite. Then we're on safe side because
1948 * fuse_flush_writepages() would call fuse_send_writepage()
1949 * anyway.
1950 *
1951 * 2nd case: someone called fuse_set_nowrite and it is waiting
1952 * now for completion of all in-flight requests. This happens
1953 * rarely and no more than once per page, so this should be
1954 * okay.
1955 *
1956 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
1957 * of fuse_set_nowrite..fuse_release_nowrite section. The fact
1958 * that fuse_set_nowrite returned implies that all in-flight
1959 * requests were completed along with all of their secondary
1960 * requests. Further primary requests are blocked by negative
1961 * writectr. Hence there cannot be any in-flight requests and
1962 * no invocations of fuse_writepage_end() while we're in
1963 * fuse_set_nowrite..fuse_release_nowrite section.
1964 */
1965 fuse_send_writepage(fm, next, inarg->offset + inarg->size);
1966 }
1967 fi->writectr--;
1968 fuse_writepage_finish(fm, wpa);
1969 spin_unlock(&fi->lock);
1970 fuse_writepage_free(wpa);
1971 }
1972
__fuse_write_file_get(struct fuse_inode * fi)1973 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
1974 {
1975 struct fuse_file *ff;
1976
1977 spin_lock(&fi->lock);
1978 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1979 write_entry);
1980 if (ff)
1981 fuse_file_get(ff);
1982 spin_unlock(&fi->lock);
1983
1984 return ff;
1985 }
1986
fuse_write_file_get(struct fuse_inode * fi)1987 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1988 {
1989 struct fuse_file *ff = __fuse_write_file_get(fi);
1990 WARN_ON(!ff);
1991 return ff;
1992 }
1993
fuse_write_inode(struct inode * inode,struct writeback_control * wbc)1994 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1995 {
1996 struct fuse_inode *fi = get_fuse_inode(inode);
1997 struct fuse_file *ff;
1998 int err;
1999
2000 /*
2001 * Inode is always written before the last reference is dropped and
2002 * hence this should not be reached from reclaim.
2003 *
2004 * Writing back the inode from reclaim can deadlock if the request
2005 * processing itself needs an allocation. Allocations triggering
2006 * reclaim while serving a request can't be prevented, because it can
2007 * involve any number of unrelated userspace processes.
2008 */
2009 WARN_ON(wbc->for_reclaim);
2010
2011 ff = __fuse_write_file_get(fi);
2012 err = fuse_flush_times(inode, ff);
2013 if (ff)
2014 fuse_file_put(ff, false);
2015
2016 return err;
2017 }
2018
fuse_writepage_args_alloc(void)2019 static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
2020 {
2021 struct fuse_writepage_args *wpa;
2022 struct fuse_args_pages *ap;
2023
2024 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
2025 if (wpa) {
2026 ap = &wpa->ia.ap;
2027 ap->num_pages = 0;
2028 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
2029 if (!ap->pages) {
2030 kfree(wpa);
2031 wpa = NULL;
2032 }
2033 }
2034 return wpa;
2035
2036 }
2037
fuse_writepage_add_to_bucket(struct fuse_conn * fc,struct fuse_writepage_args * wpa)2038 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
2039 struct fuse_writepage_args *wpa)
2040 {
2041 if (!fc->sync_fs)
2042 return;
2043
2044 rcu_read_lock();
2045 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2046 do {
2047 wpa->bucket = rcu_dereference(fc->curr_bucket);
2048 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
2049 rcu_read_unlock();
2050 }
2051
fuse_writepage_locked(struct folio * folio)2052 static int fuse_writepage_locked(struct folio *folio)
2053 {
2054 struct address_space *mapping = folio->mapping;
2055 struct inode *inode = mapping->host;
2056 struct fuse_conn *fc = get_fuse_conn(inode);
2057 struct fuse_inode *fi = get_fuse_inode(inode);
2058 struct fuse_writepage_args *wpa;
2059 struct fuse_args_pages *ap;
2060 struct folio *tmp_folio;
2061 int error = -ENOMEM;
2062
2063 folio_start_writeback(folio);
2064
2065 wpa = fuse_writepage_args_alloc();
2066 if (!wpa)
2067 goto err;
2068 ap = &wpa->ia.ap;
2069
2070 tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
2071 if (!tmp_folio)
2072 goto err_free;
2073
2074 error = -EIO;
2075 wpa->ia.ff = fuse_write_file_get(fi);
2076 if (!wpa->ia.ff)
2077 goto err_nofile;
2078
2079 fuse_writepage_add_to_bucket(fc, wpa);
2080 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0);
2081
2082 folio_copy(tmp_folio, folio);
2083 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2084 wpa->next = NULL;
2085 ap->args.in_pages = true;
2086 ap->num_pages = 1;
2087 ap->pages[0] = &tmp_folio->page;
2088 ap->descs[0].offset = 0;
2089 ap->descs[0].length = PAGE_SIZE;
2090 ap->args.end = fuse_writepage_end;
2091 wpa->inode = inode;
2092
2093 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2094 node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
2095
2096 spin_lock(&fi->lock);
2097 tree_insert(&fi->writepages, wpa);
2098 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2099 fuse_flush_writepages(inode);
2100 spin_unlock(&fi->lock);
2101
2102 folio_end_writeback(folio);
2103
2104 return 0;
2105
2106 err_nofile:
2107 folio_put(tmp_folio);
2108 err_free:
2109 kfree(wpa);
2110 err:
2111 mapping_set_error(folio->mapping, error);
2112 folio_end_writeback(folio);
2113 return error;
2114 }
2115
2116 struct fuse_fill_wb_data {
2117 struct fuse_writepage_args *wpa;
2118 struct fuse_file *ff;
2119 struct inode *inode;
2120 struct page **orig_pages;
2121 unsigned int max_pages;
2122 };
2123
fuse_pages_realloc(struct fuse_fill_wb_data * data)2124 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
2125 {
2126 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2127 struct fuse_conn *fc = get_fuse_conn(data->inode);
2128 struct page **pages;
2129 struct fuse_page_desc *descs;
2130 unsigned int npages = min_t(unsigned int,
2131 max_t(unsigned int, data->max_pages * 2,
2132 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2133 fc->max_pages);
2134 WARN_ON(npages <= data->max_pages);
2135
2136 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
2137 if (!pages)
2138 return false;
2139
2140 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
2141 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
2142 kfree(ap->pages);
2143 ap->pages = pages;
2144 ap->descs = descs;
2145 data->max_pages = npages;
2146
2147 return true;
2148 }
2149
fuse_writepages_send(struct fuse_fill_wb_data * data)2150 static void fuse_writepages_send(struct fuse_fill_wb_data *data)
2151 {
2152 struct fuse_writepage_args *wpa = data->wpa;
2153 struct inode *inode = data->inode;
2154 struct fuse_inode *fi = get_fuse_inode(inode);
2155 int num_pages = wpa->ia.ap.num_pages;
2156 int i;
2157
2158 wpa->ia.ff = fuse_file_get(data->ff);
2159 spin_lock(&fi->lock);
2160 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2161 fuse_flush_writepages(inode);
2162 spin_unlock(&fi->lock);
2163
2164 for (i = 0; i < num_pages; i++)
2165 end_page_writeback(data->orig_pages[i]);
2166 }
2167
2168 /*
2169 * Check under fi->lock if the page is under writeback, and insert it onto the
2170 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
2171 * one already added for a page at this offset. If there's none, then insert
2172 * this new request onto the auxiliary list, otherwise reuse the existing one by
2173 * swapping the new temp page with the old one.
2174 */
fuse_writepage_add(struct fuse_writepage_args * new_wpa,struct page * page)2175 static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
2176 struct page *page)
2177 {
2178 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
2179 struct fuse_writepage_args *tmp;
2180 struct fuse_writepage_args *old_wpa;
2181 struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
2182
2183 WARN_ON(new_ap->num_pages != 0);
2184 new_ap->num_pages = 1;
2185
2186 spin_lock(&fi->lock);
2187 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
2188 if (!old_wpa) {
2189 spin_unlock(&fi->lock);
2190 return true;
2191 }
2192
2193 for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
2194 pgoff_t curr_index;
2195
2196 WARN_ON(tmp->inode != new_wpa->inode);
2197 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
2198 if (curr_index == page->index) {
2199 WARN_ON(tmp->ia.ap.num_pages != 1);
2200 swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
2201 break;
2202 }
2203 }
2204
2205 if (!tmp) {
2206 new_wpa->next = old_wpa->next;
2207 old_wpa->next = new_wpa;
2208 }
2209
2210 spin_unlock(&fi->lock);
2211
2212 if (tmp) {
2213 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
2214
2215 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
2216 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
2217 wb_writeout_inc(&bdi->wb);
2218 fuse_writepage_free(new_wpa);
2219 }
2220
2221 return false;
2222 }
2223
fuse_writepage_need_send(struct fuse_conn * fc,struct page * page,struct fuse_args_pages * ap,struct fuse_fill_wb_data * data)2224 static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
2225 struct fuse_args_pages *ap,
2226 struct fuse_fill_wb_data *data)
2227 {
2228 WARN_ON(!ap->num_pages);
2229
2230 /*
2231 * Being under writeback is unlikely but possible. For example direct
2232 * read to an mmaped fuse file will set the page dirty twice; once when
2233 * the pages are faulted with get_user_pages(), and then after the read
2234 * completed.
2235 */
2236 if (fuse_page_is_writeback(data->inode, page->index))
2237 return true;
2238
2239 /* Reached max pages */
2240 if (ap->num_pages == fc->max_pages)
2241 return true;
2242
2243 /* Reached max write bytes */
2244 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
2245 return true;
2246
2247 /* Discontinuity */
2248 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
2249 return true;
2250
2251 /* Need to grow the pages array? If so, did the expansion fail? */
2252 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
2253 return true;
2254
2255 return false;
2256 }
2257
fuse_writepages_fill(struct folio * folio,struct writeback_control * wbc,void * _data)2258 static int fuse_writepages_fill(struct folio *folio,
2259 struct writeback_control *wbc, void *_data)
2260 {
2261 struct fuse_fill_wb_data *data = _data;
2262 struct fuse_writepage_args *wpa = data->wpa;
2263 struct fuse_args_pages *ap = &wpa->ia.ap;
2264 struct inode *inode = data->inode;
2265 struct fuse_inode *fi = get_fuse_inode(inode);
2266 struct fuse_conn *fc = get_fuse_conn(inode);
2267 struct page *tmp_page;
2268 int err;
2269
2270 if (!data->ff) {
2271 err = -EIO;
2272 data->ff = fuse_write_file_get(fi);
2273 if (!data->ff)
2274 goto out_unlock;
2275 }
2276
2277 if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
2278 fuse_writepages_send(data);
2279 data->wpa = NULL;
2280 }
2281
2282 err = -ENOMEM;
2283 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2284 if (!tmp_page)
2285 goto out_unlock;
2286
2287 /*
2288 * The page must not be redirtied until the writeout is completed
2289 * (i.e. userspace has sent a reply to the write request). Otherwise
2290 * there could be more than one temporary page instance for each real
2291 * page.
2292 *
2293 * This is ensured by holding the page lock in page_mkwrite() while
2294 * checking fuse_page_is_writeback(). We already hold the page lock
2295 * since clear_page_dirty_for_io() and keep it held until we add the
2296 * request to the fi->writepages list and increment ap->num_pages.
2297 * After this fuse_page_is_writeback() will indicate that the page is
2298 * under writeback, so we can release the page lock.
2299 */
2300 if (data->wpa == NULL) {
2301 err = -ENOMEM;
2302 wpa = fuse_writepage_args_alloc();
2303 if (!wpa) {
2304 __free_page(tmp_page);
2305 goto out_unlock;
2306 }
2307 fuse_writepage_add_to_bucket(fc, wpa);
2308
2309 data->max_pages = 1;
2310
2311 ap = &wpa->ia.ap;
2312 fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
2313 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2314 wpa->next = NULL;
2315 ap->args.in_pages = true;
2316 ap->args.end = fuse_writepage_end;
2317 ap->num_pages = 0;
2318 wpa->inode = inode;
2319 }
2320 folio_start_writeback(folio);
2321
2322 copy_highpage(tmp_page, &folio->page);
2323 ap->pages[ap->num_pages] = tmp_page;
2324 ap->descs[ap->num_pages].offset = 0;
2325 ap->descs[ap->num_pages].length = PAGE_SIZE;
2326 data->orig_pages[ap->num_pages] = &folio->page;
2327
2328 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2329 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
2330
2331 err = 0;
2332 if (data->wpa) {
2333 /*
2334 * Protected by fi->lock against concurrent access by
2335 * fuse_page_is_writeback().
2336 */
2337 spin_lock(&fi->lock);
2338 ap->num_pages++;
2339 spin_unlock(&fi->lock);
2340 } else if (fuse_writepage_add(wpa, &folio->page)) {
2341 data->wpa = wpa;
2342 } else {
2343 folio_end_writeback(folio);
2344 }
2345 out_unlock:
2346 folio_unlock(folio);
2347
2348 return err;
2349 }
2350
fuse_writepages(struct address_space * mapping,struct writeback_control * wbc)2351 static int fuse_writepages(struct address_space *mapping,
2352 struct writeback_control *wbc)
2353 {
2354 struct inode *inode = mapping->host;
2355 struct fuse_conn *fc = get_fuse_conn(inode);
2356 struct fuse_fill_wb_data data;
2357 int err;
2358
2359 err = -EIO;
2360 if (fuse_is_bad(inode))
2361 goto out;
2362
2363 if (wbc->sync_mode == WB_SYNC_NONE &&
2364 fc->num_background >= fc->congestion_threshold)
2365 return 0;
2366
2367 data.inode = inode;
2368 data.wpa = NULL;
2369 data.ff = NULL;
2370
2371 err = -ENOMEM;
2372 data.orig_pages = kcalloc(fc->max_pages,
2373 sizeof(struct page *),
2374 GFP_NOFS);
2375 if (!data.orig_pages)
2376 goto out;
2377
2378 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
2379 if (data.wpa) {
2380 WARN_ON(!data.wpa->ia.ap.num_pages);
2381 fuse_writepages_send(&data);
2382 }
2383 if (data.ff)
2384 fuse_file_put(data.ff, false);
2385
2386 kfree(data.orig_pages);
2387 out:
2388 return err;
2389 }
2390
2391 /*
2392 * It's worthy to make sure that space is reserved on disk for the write,
2393 * but how to implement it without killing performance need more thinking.
2394 */
fuse_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)2395 static int fuse_write_begin(struct file *file, struct address_space *mapping,
2396 loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
2397 {
2398 pgoff_t index = pos >> PAGE_SHIFT;
2399 struct fuse_conn *fc = get_fuse_conn(file_inode(file));
2400 struct folio *folio;
2401 loff_t fsize;
2402 int err = -ENOMEM;
2403
2404 WARN_ON(!fc->writeback_cache);
2405
2406 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2407 mapping_gfp_mask(mapping));
2408 if (IS_ERR(folio))
2409 goto error;
2410
2411 fuse_wait_on_page_writeback(mapping->host, folio->index);
2412
2413 if (folio_test_uptodate(folio) || len >= folio_size(folio))
2414 goto success;
2415 /*
2416 * Check if the start of this folio comes after the end of file,
2417 * in which case the readpage can be optimized away.
2418 */
2419 fsize = i_size_read(mapping->host);
2420 if (fsize <= folio_pos(folio)) {
2421 size_t off = offset_in_folio(folio, pos);
2422 if (off)
2423 folio_zero_segment(folio, 0, off);
2424 goto success;
2425 }
2426 err = fuse_do_readpage(file, &folio->page);
2427 if (err)
2428 goto cleanup;
2429 success:
2430 *foliop = folio;
2431 return 0;
2432
2433 cleanup:
2434 folio_unlock(folio);
2435 folio_put(folio);
2436 error:
2437 return err;
2438 }
2439
fuse_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2440 static int fuse_write_end(struct file *file, struct address_space *mapping,
2441 loff_t pos, unsigned len, unsigned copied,
2442 struct folio *folio, void *fsdata)
2443 {
2444 struct inode *inode = folio->mapping->host;
2445
2446 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
2447 if (!copied)
2448 goto unlock;
2449
2450 pos += copied;
2451 if (!folio_test_uptodate(folio)) {
2452 /* Zero any unwritten bytes at the end of the page */
2453 size_t endoff = pos & ~PAGE_MASK;
2454 if (endoff)
2455 folio_zero_segment(folio, endoff, PAGE_SIZE);
2456 folio_mark_uptodate(folio);
2457 }
2458
2459 if (pos > inode->i_size)
2460 i_size_write(inode, pos);
2461
2462 folio_mark_dirty(folio);
2463
2464 unlock:
2465 folio_unlock(folio);
2466 folio_put(folio);
2467
2468 return copied;
2469 }
2470
fuse_launder_folio(struct folio * folio)2471 static int fuse_launder_folio(struct folio *folio)
2472 {
2473 int err = 0;
2474 if (folio_clear_dirty_for_io(folio)) {
2475 struct inode *inode = folio->mapping->host;
2476
2477 /* Serialize with pending writeback for the same page */
2478 fuse_wait_on_page_writeback(inode, folio->index);
2479 err = fuse_writepage_locked(folio);
2480 if (!err)
2481 fuse_wait_on_page_writeback(inode, folio->index);
2482 }
2483 return err;
2484 }
2485
2486 /*
2487 * Write back dirty data/metadata now (there may not be any suitable
2488 * open files later for data)
2489 */
fuse_vma_close(struct vm_area_struct * vma)2490 static void fuse_vma_close(struct vm_area_struct *vma)
2491 {
2492 int err;
2493
2494 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2495 mapping_set_error(vma->vm_file->f_mapping, err);
2496 }
2497
2498 /*
2499 * Wait for writeback against this page to complete before allowing it
2500 * to be marked dirty again, and hence written back again, possibly
2501 * before the previous writepage completed.
2502 *
2503 * Block here, instead of in ->writepage(), so that the userspace fs
2504 * can only block processes actually operating on the filesystem.
2505 *
2506 * Otherwise unprivileged userspace fs would be able to block
2507 * unrelated:
2508 *
2509 * - page migration
2510 * - sync(2)
2511 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2512 */
fuse_page_mkwrite(struct vm_fault * vmf)2513 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
2514 {
2515 struct page *page = vmf->page;
2516 struct inode *inode = file_inode(vmf->vma->vm_file);
2517
2518 file_update_time(vmf->vma->vm_file);
2519 lock_page(page);
2520 if (page->mapping != inode->i_mapping) {
2521 unlock_page(page);
2522 return VM_FAULT_NOPAGE;
2523 }
2524
2525 fuse_wait_on_page_writeback(inode, page->index);
2526 return VM_FAULT_LOCKED;
2527 }
2528
2529 static const struct vm_operations_struct fuse_file_vm_ops = {
2530 .close = fuse_vma_close,
2531 .fault = filemap_fault,
2532 .map_pages = filemap_map_pages,
2533 .page_mkwrite = fuse_page_mkwrite,
2534 };
2535
fuse_file_mmap(struct file * file,struct vm_area_struct * vma)2536 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2537 {
2538 struct fuse_file *ff = file->private_data;
2539 struct fuse_conn *fc = ff->fm->fc;
2540 struct inode *inode = file_inode(file);
2541 int rc;
2542
2543 /* DAX mmap is superior to direct_io mmap */
2544 if (FUSE_IS_DAX(inode))
2545 return fuse_dax_mmap(file, vma);
2546
2547 /*
2548 * If inode is in passthrough io mode, because it has some file open
2549 * in passthrough mode, either mmap to backing file or fail mmap,
2550 * because mixing cached mmap and passthrough io mode is not allowed.
2551 */
2552 if (fuse_file_passthrough(ff))
2553 return fuse_passthrough_mmap(file, vma);
2554 else if (fuse_inode_backing(get_fuse_inode(inode)))
2555 return -ENODEV;
2556
2557 /*
2558 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2559 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2560 */
2561 if (ff->open_flags & FOPEN_DIRECT_IO) {
2562 /*
2563 * Can't provide the coherency needed for MAP_SHARED
2564 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
2565 */
2566 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
2567 return -ENODEV;
2568
2569 invalidate_inode_pages2(file->f_mapping);
2570
2571 if (!(vma->vm_flags & VM_MAYSHARE)) {
2572 /* MAP_PRIVATE */
2573 return generic_file_mmap(file, vma);
2574 }
2575
2576 /*
2577 * First mmap of direct_io file enters caching inode io mode.
2578 * Also waits for parallel dio writers to go into serial mode
2579 * (exclusive instead of shared lock).
2580 * After first mmap, the inode stays in caching io mode until
2581 * the direct_io file release.
2582 */
2583 rc = fuse_file_cached_io_open(inode, ff);
2584 if (rc)
2585 return rc;
2586 }
2587
2588 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2589 fuse_link_write_file(file);
2590
2591 file_accessed(file);
2592 vma->vm_ops = &fuse_file_vm_ops;
2593 return 0;
2594 }
2595
convert_fuse_file_lock(struct fuse_conn * fc,const struct fuse_file_lock * ffl,struct file_lock * fl)2596 static int convert_fuse_file_lock(struct fuse_conn *fc,
2597 const struct fuse_file_lock *ffl,
2598 struct file_lock *fl)
2599 {
2600 switch (ffl->type) {
2601 case F_UNLCK:
2602 break;
2603
2604 case F_RDLCK:
2605 case F_WRLCK:
2606 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2607 ffl->end < ffl->start)
2608 return -EIO;
2609
2610 fl->fl_start = ffl->start;
2611 fl->fl_end = ffl->end;
2612
2613 /*
2614 * Convert pid into init's pid namespace. The locks API will
2615 * translate it into the caller's pid namespace.
2616 */
2617 rcu_read_lock();
2618 fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
2619 rcu_read_unlock();
2620 break;
2621
2622 default:
2623 return -EIO;
2624 }
2625 fl->c.flc_type = ffl->type;
2626 return 0;
2627 }
2628
fuse_lk_fill(struct fuse_args * args,struct file * file,const struct file_lock * fl,int opcode,pid_t pid,int flock,struct fuse_lk_in * inarg)2629 static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2630 const struct file_lock *fl, int opcode, pid_t pid,
2631 int flock, struct fuse_lk_in *inarg)
2632 {
2633 struct inode *inode = file_inode(file);
2634 struct fuse_conn *fc = get_fuse_conn(inode);
2635 struct fuse_file *ff = file->private_data;
2636
2637 memset(inarg, 0, sizeof(*inarg));
2638 inarg->fh = ff->fh;
2639 inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner);
2640 inarg->lk.start = fl->fl_start;
2641 inarg->lk.end = fl->fl_end;
2642 inarg->lk.type = fl->c.flc_type;
2643 inarg->lk.pid = pid;
2644 if (flock)
2645 inarg->lk_flags |= FUSE_LK_FLOCK;
2646 args->opcode = opcode;
2647 args->nodeid = get_node_id(inode);
2648 args->in_numargs = 1;
2649 args->in_args[0].size = sizeof(*inarg);
2650 args->in_args[0].value = inarg;
2651 }
2652
fuse_getlk(struct file * file,struct file_lock * fl)2653 static int fuse_getlk(struct file *file, struct file_lock *fl)
2654 {
2655 struct inode *inode = file_inode(file);
2656 struct fuse_mount *fm = get_fuse_mount(inode);
2657 FUSE_ARGS(args);
2658 struct fuse_lk_in inarg;
2659 struct fuse_lk_out outarg;
2660 int err;
2661
2662 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2663 args.out_numargs = 1;
2664 args.out_args[0].size = sizeof(outarg);
2665 args.out_args[0].value = &outarg;
2666 err = fuse_simple_request(fm, &args);
2667 if (!err)
2668 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
2669
2670 return err;
2671 }
2672
fuse_setlk(struct file * file,struct file_lock * fl,int flock)2673 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2674 {
2675 struct inode *inode = file_inode(file);
2676 struct fuse_mount *fm = get_fuse_mount(inode);
2677 FUSE_ARGS(args);
2678 struct fuse_lk_in inarg;
2679 int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2680 struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL;
2681 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
2682 int err;
2683
2684 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
2685 /* NLM needs asynchronous locks, which we don't support yet */
2686 return -ENOLCK;
2687 }
2688
2689 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
2690 err = fuse_simple_request(fm, &args);
2691
2692 /* locking is restartable */
2693 if (err == -EINTR)
2694 err = -ERESTARTSYS;
2695
2696 return err;
2697 }
2698
fuse_file_lock(struct file * file,int cmd,struct file_lock * fl)2699 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2700 {
2701 struct inode *inode = file_inode(file);
2702 struct fuse_conn *fc = get_fuse_conn(inode);
2703 int err;
2704
2705 if (cmd == F_CANCELLK) {
2706 err = 0;
2707 } else if (cmd == F_GETLK) {
2708 if (fc->no_lock) {
2709 posix_test_lock(file, fl);
2710 err = 0;
2711 } else
2712 err = fuse_getlk(file, fl);
2713 } else {
2714 if (fc->no_lock)
2715 err = posix_lock_file(file, fl, NULL);
2716 else
2717 err = fuse_setlk(file, fl, 0);
2718 }
2719 return err;
2720 }
2721
fuse_file_flock(struct file * file,int cmd,struct file_lock * fl)2722 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2723 {
2724 struct inode *inode = file_inode(file);
2725 struct fuse_conn *fc = get_fuse_conn(inode);
2726 int err;
2727
2728 if (fc->no_flock) {
2729 err = locks_lock_file_wait(file, fl);
2730 } else {
2731 struct fuse_file *ff = file->private_data;
2732
2733 /* emulate flock with POSIX locks */
2734 ff->flock = true;
2735 err = fuse_setlk(file, fl, 1);
2736 }
2737
2738 return err;
2739 }
2740
fuse_bmap(struct address_space * mapping,sector_t block)2741 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2742 {
2743 struct inode *inode = mapping->host;
2744 struct fuse_mount *fm = get_fuse_mount(inode);
2745 FUSE_ARGS(args);
2746 struct fuse_bmap_in inarg;
2747 struct fuse_bmap_out outarg;
2748 int err;
2749
2750 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
2751 return 0;
2752
2753 memset(&inarg, 0, sizeof(inarg));
2754 inarg.block = block;
2755 inarg.blocksize = inode->i_sb->s_blocksize;
2756 args.opcode = FUSE_BMAP;
2757 args.nodeid = get_node_id(inode);
2758 args.in_numargs = 1;
2759 args.in_args[0].size = sizeof(inarg);
2760 args.in_args[0].value = &inarg;
2761 args.out_numargs = 1;
2762 args.out_args[0].size = sizeof(outarg);
2763 args.out_args[0].value = &outarg;
2764 err = fuse_simple_request(fm, &args);
2765 if (err == -ENOSYS)
2766 fm->fc->no_bmap = 1;
2767
2768 return err ? 0 : outarg.block;
2769 }
2770
fuse_lseek(struct file * file,loff_t offset,int whence)2771 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2772 {
2773 struct inode *inode = file->f_mapping->host;
2774 struct fuse_mount *fm = get_fuse_mount(inode);
2775 struct fuse_file *ff = file->private_data;
2776 FUSE_ARGS(args);
2777 struct fuse_lseek_in inarg = {
2778 .fh = ff->fh,
2779 .offset = offset,
2780 .whence = whence
2781 };
2782 struct fuse_lseek_out outarg;
2783 int err;
2784
2785 if (fm->fc->no_lseek)
2786 goto fallback;
2787
2788 args.opcode = FUSE_LSEEK;
2789 args.nodeid = ff->nodeid;
2790 args.in_numargs = 1;
2791 args.in_args[0].size = sizeof(inarg);
2792 args.in_args[0].value = &inarg;
2793 args.out_numargs = 1;
2794 args.out_args[0].size = sizeof(outarg);
2795 args.out_args[0].value = &outarg;
2796 err = fuse_simple_request(fm, &args);
2797 if (err) {
2798 if (err == -ENOSYS) {
2799 fm->fc->no_lseek = 1;
2800 goto fallback;
2801 }
2802 return err;
2803 }
2804
2805 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2806
2807 fallback:
2808 err = fuse_update_attributes(inode, file, STATX_SIZE);
2809 if (!err)
2810 return generic_file_llseek(file, offset, whence);
2811 else
2812 return err;
2813 }
2814
fuse_file_llseek(struct file * file,loff_t offset,int whence)2815 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2816 {
2817 loff_t retval;
2818 struct inode *inode = file_inode(file);
2819
2820 switch (whence) {
2821 case SEEK_SET:
2822 case SEEK_CUR:
2823 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2824 retval = generic_file_llseek(file, offset, whence);
2825 break;
2826 case SEEK_END:
2827 inode_lock(inode);
2828 retval = fuse_update_attributes(inode, file, STATX_SIZE);
2829 if (!retval)
2830 retval = generic_file_llseek(file, offset, whence);
2831 inode_unlock(inode);
2832 break;
2833 case SEEK_HOLE:
2834 case SEEK_DATA:
2835 inode_lock(inode);
2836 retval = fuse_lseek(file, offset, whence);
2837 inode_unlock(inode);
2838 break;
2839 default:
2840 retval = -EINVAL;
2841 }
2842
2843 return retval;
2844 }
2845
2846 /*
2847 * All files which have been polled are linked to RB tree
2848 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2849 * find the matching one.
2850 */
fuse_find_polled_node(struct fuse_conn * fc,u64 kh,struct rb_node ** parent_out)2851 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2852 struct rb_node **parent_out)
2853 {
2854 struct rb_node **link = &fc->polled_files.rb_node;
2855 struct rb_node *last = NULL;
2856
2857 while (*link) {
2858 struct fuse_file *ff;
2859
2860 last = *link;
2861 ff = rb_entry(last, struct fuse_file, polled_node);
2862
2863 if (kh < ff->kh)
2864 link = &last->rb_left;
2865 else if (kh > ff->kh)
2866 link = &last->rb_right;
2867 else
2868 return link;
2869 }
2870
2871 if (parent_out)
2872 *parent_out = last;
2873 return link;
2874 }
2875
2876 /*
2877 * The file is about to be polled. Make sure it's on the polled_files
2878 * RB tree. Note that files once added to the polled_files tree are
2879 * not removed before the file is released. This is because a file
2880 * polled once is likely to be polled again.
2881 */
fuse_register_polled_file(struct fuse_conn * fc,struct fuse_file * ff)2882 static void fuse_register_polled_file(struct fuse_conn *fc,
2883 struct fuse_file *ff)
2884 {
2885 spin_lock(&fc->lock);
2886 if (RB_EMPTY_NODE(&ff->polled_node)) {
2887 struct rb_node **link, *parent;
2888
2889 link = fuse_find_polled_node(fc, ff->kh, &parent);
2890 BUG_ON(*link);
2891 rb_link_node(&ff->polled_node, parent, link);
2892 rb_insert_color(&ff->polled_node, &fc->polled_files);
2893 }
2894 spin_unlock(&fc->lock);
2895 }
2896
fuse_file_poll(struct file * file,poll_table * wait)2897 __poll_t fuse_file_poll(struct file *file, poll_table *wait)
2898 {
2899 struct fuse_file *ff = file->private_data;
2900 struct fuse_mount *fm = ff->fm;
2901 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2902 struct fuse_poll_out outarg;
2903 FUSE_ARGS(args);
2904 int err;
2905
2906 if (fm->fc->no_poll)
2907 return DEFAULT_POLLMASK;
2908
2909 poll_wait(file, &ff->poll_wait, wait);
2910 inarg.events = mangle_poll(poll_requested_events(wait));
2911
2912 /*
2913 * Ask for notification iff there's someone waiting for it.
2914 * The client may ignore the flag and always notify.
2915 */
2916 if (waitqueue_active(&ff->poll_wait)) {
2917 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2918 fuse_register_polled_file(fm->fc, ff);
2919 }
2920
2921 args.opcode = FUSE_POLL;
2922 args.nodeid = ff->nodeid;
2923 args.in_numargs = 1;
2924 args.in_args[0].size = sizeof(inarg);
2925 args.in_args[0].value = &inarg;
2926 args.out_numargs = 1;
2927 args.out_args[0].size = sizeof(outarg);
2928 args.out_args[0].value = &outarg;
2929 err = fuse_simple_request(fm, &args);
2930
2931 if (!err)
2932 return demangle_poll(outarg.revents);
2933 if (err == -ENOSYS) {
2934 fm->fc->no_poll = 1;
2935 return DEFAULT_POLLMASK;
2936 }
2937 return EPOLLERR;
2938 }
2939 EXPORT_SYMBOL_GPL(fuse_file_poll);
2940
2941 /*
2942 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2943 * wakes up the poll waiters.
2944 */
fuse_notify_poll_wakeup(struct fuse_conn * fc,struct fuse_notify_poll_wakeup_out * outarg)2945 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2946 struct fuse_notify_poll_wakeup_out *outarg)
2947 {
2948 u64 kh = outarg->kh;
2949 struct rb_node **link;
2950
2951 spin_lock(&fc->lock);
2952
2953 link = fuse_find_polled_node(fc, kh, NULL);
2954 if (*link) {
2955 struct fuse_file *ff;
2956
2957 ff = rb_entry(*link, struct fuse_file, polled_node);
2958 wake_up_interruptible_sync(&ff->poll_wait);
2959 }
2960
2961 spin_unlock(&fc->lock);
2962 return 0;
2963 }
2964
fuse_do_truncate(struct file * file)2965 static void fuse_do_truncate(struct file *file)
2966 {
2967 struct inode *inode = file->f_mapping->host;
2968 struct iattr attr;
2969
2970 attr.ia_valid = ATTR_SIZE;
2971 attr.ia_size = i_size_read(inode);
2972
2973 attr.ia_file = file;
2974 attr.ia_valid |= ATTR_FILE;
2975
2976 fuse_do_setattr(file_dentry(file), &attr, file);
2977 }
2978
fuse_round_up(struct fuse_conn * fc,loff_t off)2979 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
2980 {
2981 return round_up(off, fc->max_pages << PAGE_SHIFT);
2982 }
2983
2984 static ssize_t
fuse_direct_IO(struct kiocb * iocb,struct iov_iter * iter)2985 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2986 {
2987 DECLARE_COMPLETION_ONSTACK(wait);
2988 ssize_t ret = 0;
2989 struct file *file = iocb->ki_filp;
2990 struct fuse_file *ff = file->private_data;
2991 loff_t pos = 0;
2992 struct inode *inode;
2993 loff_t i_size;
2994 size_t count = iov_iter_count(iter), shortened = 0;
2995 loff_t offset = iocb->ki_pos;
2996 struct fuse_io_priv *io;
2997
2998 pos = offset;
2999 inode = file->f_mapping->host;
3000 i_size = i_size_read(inode);
3001
3002 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
3003 return 0;
3004
3005 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
3006 if (!io)
3007 return -ENOMEM;
3008 spin_lock_init(&io->lock);
3009 kref_init(&io->refcnt);
3010 io->reqs = 1;
3011 io->bytes = -1;
3012 io->size = 0;
3013 io->offset = offset;
3014 io->write = (iov_iter_rw(iter) == WRITE);
3015 io->err = 0;
3016 /*
3017 * By default, we want to optimize all I/Os with async request
3018 * submission to the client filesystem if supported.
3019 */
3020 io->async = ff->fm->fc->async_dio;
3021 io->iocb = iocb;
3022 io->blocking = is_sync_kiocb(iocb);
3023
3024 /* optimization for short read */
3025 if (io->async && !io->write && offset + count > i_size) {
3026 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
3027 shortened = count - iov_iter_count(iter);
3028 count -= shortened;
3029 }
3030
3031 /*
3032 * We cannot asynchronously extend the size of a file.
3033 * In such case the aio will behave exactly like sync io.
3034 */
3035 if ((offset + count > i_size) && io->write)
3036 io->blocking = true;
3037
3038 if (io->async && io->blocking) {
3039 /*
3040 * Additional reference to keep io around after
3041 * calling fuse_aio_complete()
3042 */
3043 kref_get(&io->refcnt);
3044 io->done = &wait;
3045 }
3046
3047 if (iov_iter_rw(iter) == WRITE) {
3048 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
3049 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
3050 } else {
3051 ret = __fuse_direct_read(io, iter, &pos);
3052 }
3053 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
3054
3055 if (io->async) {
3056 bool blocking = io->blocking;
3057
3058 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
3059
3060 /* we have a non-extending, async request, so return */
3061 if (!blocking)
3062 return -EIOCBQUEUED;
3063
3064 wait_for_completion(&wait);
3065 ret = fuse_get_res_by_io(io);
3066 }
3067
3068 kref_put(&io->refcnt, fuse_io_release);
3069
3070 if (iov_iter_rw(iter) == WRITE) {
3071 fuse_write_update_attr(inode, pos, ret);
3072 /* For extending writes we already hold exclusive lock */
3073 if (ret < 0 && offset + count > i_size)
3074 fuse_do_truncate(file);
3075 }
3076
3077 return ret;
3078 }
3079
fuse_writeback_range(struct inode * inode,loff_t start,loff_t end)3080 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
3081 {
3082 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
3083
3084 if (!err)
3085 fuse_sync_writes(inode);
3086
3087 return err;
3088 }
3089
fuse_file_fallocate(struct file * file,int mode,loff_t offset,loff_t length)3090 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3091 loff_t length)
3092 {
3093 struct fuse_file *ff = file->private_data;
3094 struct inode *inode = file_inode(file);
3095 struct fuse_inode *fi = get_fuse_inode(inode);
3096 struct fuse_mount *fm = ff->fm;
3097 FUSE_ARGS(args);
3098 struct fuse_fallocate_in inarg = {
3099 .fh = ff->fh,
3100 .offset = offset,
3101 .length = length,
3102 .mode = mode
3103 };
3104 int err;
3105 bool block_faults = FUSE_IS_DAX(inode) &&
3106 (!(mode & FALLOC_FL_KEEP_SIZE) ||
3107 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
3108
3109 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3110 FALLOC_FL_ZERO_RANGE))
3111 return -EOPNOTSUPP;
3112
3113 if (fm->fc->no_fallocate)
3114 return -EOPNOTSUPP;
3115
3116 inode_lock(inode);
3117 if (block_faults) {
3118 filemap_invalidate_lock(inode->i_mapping);
3119 err = fuse_dax_break_layouts(inode, 0, 0);
3120 if (err)
3121 goto out;
3122 }
3123
3124 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
3125 loff_t endbyte = offset + length - 1;
3126
3127 err = fuse_writeback_range(inode, offset, endbyte);
3128 if (err)
3129 goto out;
3130 }
3131
3132 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3133 offset + length > i_size_read(inode)) {
3134 err = inode_newsize_ok(inode, offset + length);
3135 if (err)
3136 goto out;
3137 }
3138
3139 err = file_modified(file);
3140 if (err)
3141 goto out;
3142
3143 if (!(mode & FALLOC_FL_KEEP_SIZE))
3144 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3145
3146 args.opcode = FUSE_FALLOCATE;
3147 args.nodeid = ff->nodeid;
3148 args.in_numargs = 1;
3149 args.in_args[0].size = sizeof(inarg);
3150 args.in_args[0].value = &inarg;
3151 err = fuse_simple_request(fm, &args);
3152 if (err == -ENOSYS) {
3153 fm->fc->no_fallocate = 1;
3154 err = -EOPNOTSUPP;
3155 }
3156 if (err)
3157 goto out;
3158
3159 /* we could have extended the file */
3160 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3161 if (fuse_write_update_attr(inode, offset + length, length))
3162 file_update_time(file);
3163 }
3164
3165 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
3166 truncate_pagecache_range(inode, offset, offset + length - 1);
3167
3168 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
3169
3170 out:
3171 if (!(mode & FALLOC_FL_KEEP_SIZE))
3172 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3173
3174 if (block_faults)
3175 filemap_invalidate_unlock(inode->i_mapping);
3176
3177 inode_unlock(inode);
3178
3179 fuse_flush_time_update(inode);
3180
3181 return err;
3182 }
3183
__fuse_copy_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len,unsigned int flags)3184 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3185 struct file *file_out, loff_t pos_out,
3186 size_t len, unsigned int flags)
3187 {
3188 struct fuse_file *ff_in = file_in->private_data;
3189 struct fuse_file *ff_out = file_out->private_data;
3190 struct inode *inode_in = file_inode(file_in);
3191 struct inode *inode_out = file_inode(file_out);
3192 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
3193 struct fuse_mount *fm = ff_in->fm;
3194 struct fuse_conn *fc = fm->fc;
3195 FUSE_ARGS(args);
3196 struct fuse_copy_file_range_in inarg = {
3197 .fh_in = ff_in->fh,
3198 .off_in = pos_in,
3199 .nodeid_out = ff_out->nodeid,
3200 .fh_out = ff_out->fh,
3201 .off_out = pos_out,
3202 .len = len,
3203 .flags = flags
3204 };
3205 struct fuse_write_out outarg;
3206 ssize_t err;
3207 /* mark unstable when write-back is not used, and file_out gets
3208 * extended */
3209 bool is_unstable = (!fc->writeback_cache) &&
3210 ((pos_out + len) > inode_out->i_size);
3211
3212 if (fc->no_copy_file_range)
3213 return -EOPNOTSUPP;
3214
3215 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3216 return -EXDEV;
3217
3218 inode_lock(inode_in);
3219 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3220 inode_unlock(inode_in);
3221 if (err)
3222 return err;
3223
3224 inode_lock(inode_out);
3225
3226 err = file_modified(file_out);
3227 if (err)
3228 goto out;
3229
3230 /*
3231 * Write out dirty pages in the destination file before sending the COPY
3232 * request to userspace. After the request is completed, truncate off
3233 * pages (including partial ones) from the cache that have been copied,
3234 * since these contain stale data at that point.
3235 *
3236 * This should be mostly correct, but if the COPY writes to partial
3237 * pages (at the start or end) and the parts not covered by the COPY are
3238 * written through a memory map after calling fuse_writeback_range(),
3239 * then these partial page modifications will be lost on truncation.
3240 *
3241 * It is unlikely that someone would rely on such mixed style
3242 * modifications. Yet this does give less guarantees than if the
3243 * copying was performed with write(2).
3244 *
3245 * To fix this a mapping->invalidate_lock could be used to prevent new
3246 * faults while the copy is ongoing.
3247 */
3248 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3249 if (err)
3250 goto out;
3251
3252 if (is_unstable)
3253 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3254
3255 args.opcode = FUSE_COPY_FILE_RANGE;
3256 args.nodeid = ff_in->nodeid;
3257 args.in_numargs = 1;
3258 args.in_args[0].size = sizeof(inarg);
3259 args.in_args[0].value = &inarg;
3260 args.out_numargs = 1;
3261 args.out_args[0].size = sizeof(outarg);
3262 args.out_args[0].value = &outarg;
3263 err = fuse_simple_request(fm, &args);
3264 if (err == -ENOSYS) {
3265 fc->no_copy_file_range = 1;
3266 err = -EOPNOTSUPP;
3267 }
3268 if (err)
3269 goto out;
3270
3271 truncate_inode_pages_range(inode_out->i_mapping,
3272 ALIGN_DOWN(pos_out, PAGE_SIZE),
3273 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3274
3275 file_update_time(file_out);
3276 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
3277
3278 err = outarg.size;
3279 out:
3280 if (is_unstable)
3281 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3282
3283 inode_unlock(inode_out);
3284 file_accessed(file_in);
3285
3286 fuse_flush_time_update(inode_out);
3287
3288 return err;
3289 }
3290
fuse_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)3291 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3292 struct file *dst_file, loff_t dst_off,
3293 size_t len, unsigned int flags)
3294 {
3295 ssize_t ret;
3296
3297 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3298 len, flags);
3299
3300 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3301 ret = splice_copy_file_range(src_file, src_off, dst_file,
3302 dst_off, len);
3303 return ret;
3304 }
3305
3306 static const struct file_operations fuse_file_operations = {
3307 .llseek = fuse_file_llseek,
3308 .read_iter = fuse_file_read_iter,
3309 .write_iter = fuse_file_write_iter,
3310 .mmap = fuse_file_mmap,
3311 .open = fuse_open,
3312 .flush = fuse_flush,
3313 .release = fuse_release,
3314 .fsync = fuse_fsync,
3315 .lock = fuse_file_lock,
3316 .get_unmapped_area = thp_get_unmapped_area,
3317 .flock = fuse_file_flock,
3318 .splice_read = fuse_splice_read,
3319 .splice_write = fuse_splice_write,
3320 .unlocked_ioctl = fuse_file_ioctl,
3321 .compat_ioctl = fuse_file_compat_ioctl,
3322 .poll = fuse_file_poll,
3323 .fallocate = fuse_file_fallocate,
3324 .copy_file_range = fuse_copy_file_range,
3325 };
3326
3327 static const struct address_space_operations fuse_file_aops = {
3328 .read_folio = fuse_read_folio,
3329 .readahead = fuse_readahead,
3330 .writepages = fuse_writepages,
3331 .launder_folio = fuse_launder_folio,
3332 .dirty_folio = filemap_dirty_folio,
3333 .migrate_folio = filemap_migrate_folio,
3334 .bmap = fuse_bmap,
3335 .direct_IO = fuse_direct_IO,
3336 .write_begin = fuse_write_begin,
3337 .write_end = fuse_write_end,
3338 };
3339
fuse_init_file_inode(struct inode * inode,unsigned int flags)3340 void fuse_init_file_inode(struct inode *inode, unsigned int flags)
3341 {
3342 struct fuse_inode *fi = get_fuse_inode(inode);
3343
3344 inode->i_fop = &fuse_file_operations;
3345 inode->i_data.a_ops = &fuse_file_aops;
3346
3347 INIT_LIST_HEAD(&fi->write_files);
3348 INIT_LIST_HEAD(&fi->queued_writes);
3349 fi->writectr = 0;
3350 fi->iocachectr = 0;
3351 init_waitqueue_head(&fi->page_waitq);
3352 init_waitqueue_head(&fi->direct_io_waitq);
3353 fi->writepages = RB_ROOT;
3354
3355 if (IS_ENABLED(CONFIG_FUSE_DAX))
3356 fuse_dax_inode_init(inode, flags);
3357 }
3358