1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
20 #include <linux/fs.h>
21 #include <linux/filelock.h>
22 #include <linux/splice.h>
23 #include <linux/task_io_accounting_ops.h>
24 #include <linux/iomap.h>
25
fuse_send_open(struct fuse_mount * fm,u64 nodeid,unsigned int open_flags,int opcode,struct fuse_open_out * outargp)26 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
27 unsigned int open_flags, int opcode,
28 struct fuse_open_out *outargp)
29 {
30 struct fuse_open_in inarg;
31 FUSE_ARGS(args);
32
33 memset(&inarg, 0, sizeof(inarg));
34 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
35 if (!fm->fc->atomic_o_trunc)
36 inarg.flags &= ~O_TRUNC;
37
38 if (fm->fc->handle_killpriv_v2 &&
39 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
40 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
41 }
42
43 args.opcode = opcode;
44 args.nodeid = nodeid;
45 args.in_numargs = 1;
46 args.in_args[0].size = sizeof(inarg);
47 args.in_args[0].value = &inarg;
48 args.out_numargs = 1;
49 args.out_args[0].size = sizeof(*outargp);
50 args.out_args[0].value = outargp;
51
52 return fuse_simple_request(fm, &args);
53 }
54
fuse_file_alloc(struct fuse_mount * fm,bool release)55 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
56 {
57 struct fuse_file *ff;
58
59 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
60 if (unlikely(!ff))
61 return NULL;
62
63 ff->fm = fm;
64 if (release) {
65 ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT);
66 if (!ff->args) {
67 kfree(ff);
68 return NULL;
69 }
70 }
71
72 INIT_LIST_HEAD(&ff->write_entry);
73 refcount_set(&ff->count, 1);
74 RB_CLEAR_NODE(&ff->polled_node);
75 init_waitqueue_head(&ff->poll_wait);
76
77 ff->kh = atomic64_inc_return(&fm->fc->khctr);
78
79 return ff;
80 }
81
fuse_file_free(struct fuse_file * ff)82 void fuse_file_free(struct fuse_file *ff)
83 {
84 kfree(ff->args);
85 kfree(ff);
86 }
87
fuse_file_get(struct fuse_file * ff)88 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
89 {
90 refcount_inc(&ff->count);
91 return ff;
92 }
93
fuse_release_end(struct fuse_mount * fm,struct fuse_args * args,int error)94 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
95 int error)
96 {
97 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
98
99 iput(ra->inode);
100 kfree(ra);
101 }
102
fuse_file_put(struct fuse_file * ff,bool sync)103 static void fuse_file_put(struct fuse_file *ff, bool sync)
104 {
105 if (refcount_dec_and_test(&ff->count)) {
106 struct fuse_release_args *ra = &ff->args->release_args;
107 struct fuse_args *args = (ra ? &ra->args : NULL);
108
109 if (ra && ra->inode)
110 fuse_file_io_release(ff, ra->inode);
111
112 if (!args) {
113 /* Do nothing when server does not implement 'open' */
114 } else if (sync) {
115 fuse_simple_request(ff->fm, args);
116 fuse_release_end(ff->fm, args, 0);
117 } else {
118 args->end = fuse_release_end;
119 if (fuse_simple_background(ff->fm, args,
120 GFP_KERNEL | __GFP_NOFAIL))
121 fuse_release_end(ff->fm, args, -ENOTCONN);
122 }
123 kfree(ff);
124 }
125 }
126
fuse_file_open(struct fuse_mount * fm,u64 nodeid,unsigned int open_flags,bool isdir)127 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
128 unsigned int open_flags, bool isdir)
129 {
130 struct fuse_conn *fc = fm->fc;
131 struct fuse_file *ff;
132 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
133 bool open = isdir ? !fc->no_opendir : !fc->no_open;
134
135 ff = fuse_file_alloc(fm, open);
136 if (!ff)
137 return ERR_PTR(-ENOMEM);
138
139 ff->fh = 0;
140 /* Default for no-open */
141 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
142 if (open) {
143 /* Store outarg for fuse_finish_open() */
144 struct fuse_open_out *outargp = &ff->args->open_outarg;
145 int err;
146
147 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp);
148 if (!err) {
149 ff->fh = outargp->fh;
150 ff->open_flags = outargp->open_flags;
151 } else if (err != -ENOSYS) {
152 fuse_file_free(ff);
153 return ERR_PTR(err);
154 } else {
155 /* No release needed */
156 kfree(ff->args);
157 ff->args = NULL;
158 if (isdir)
159 fc->no_opendir = 1;
160 else
161 fc->no_open = 1;
162 }
163 }
164
165 if (isdir)
166 ff->open_flags &= ~FOPEN_DIRECT_IO;
167
168 ff->nodeid = nodeid;
169
170 return ff;
171 }
172
fuse_do_open(struct fuse_mount * fm,u64 nodeid,struct file * file,bool isdir)173 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
174 bool isdir)
175 {
176 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
177
178 if (!IS_ERR(ff))
179 file->private_data = ff;
180
181 return PTR_ERR_OR_ZERO(ff);
182 }
183 EXPORT_SYMBOL_GPL(fuse_do_open);
184
fuse_link_write_file(struct file * file)185 static void fuse_link_write_file(struct file *file)
186 {
187 struct inode *inode = file_inode(file);
188 struct fuse_inode *fi = get_fuse_inode(inode);
189 struct fuse_file *ff = file->private_data;
190 /*
191 * file may be written through mmap, so chain it onto the
192 * inodes's write_file list
193 */
194 spin_lock(&fi->lock);
195 if (list_empty(&ff->write_entry))
196 list_add(&ff->write_entry, &fi->write_files);
197 spin_unlock(&fi->lock);
198 }
199
fuse_finish_open(struct inode * inode,struct file * file)200 int fuse_finish_open(struct inode *inode, struct file *file)
201 {
202 struct fuse_file *ff = file->private_data;
203 struct fuse_conn *fc = get_fuse_conn(inode);
204 int err;
205
206 err = fuse_file_io_open(file, inode);
207 if (err)
208 return err;
209
210 if (ff->open_flags & FOPEN_STREAM)
211 stream_open(inode, file);
212 else if (ff->open_flags & FOPEN_NONSEEKABLE)
213 nonseekable_open(inode, file);
214
215 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
216 fuse_link_write_file(file);
217
218 return 0;
219 }
220
fuse_truncate_update_attr(struct inode * inode,struct file * file)221 static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
222 {
223 struct fuse_conn *fc = get_fuse_conn(inode);
224 struct fuse_inode *fi = get_fuse_inode(inode);
225
226 spin_lock(&fi->lock);
227 fi->attr_version = atomic64_inc_return(&fc->attr_version);
228 i_size_write(inode, 0);
229 spin_unlock(&fi->lock);
230 file_update_time(file);
231 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
232 }
233
fuse_open(struct inode * inode,struct file * file)234 static int fuse_open(struct inode *inode, struct file *file)
235 {
236 struct fuse_mount *fm = get_fuse_mount(inode);
237 struct fuse_inode *fi = get_fuse_inode(inode);
238 struct fuse_conn *fc = fm->fc;
239 struct fuse_file *ff;
240 int err;
241 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
242 bool is_wb_truncate = is_truncate && fc->writeback_cache;
243 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
244
245 if (fuse_is_bad(inode))
246 return -EIO;
247
248 err = generic_file_open(inode, file);
249 if (err)
250 return err;
251
252 if (is_wb_truncate || dax_truncate)
253 inode_lock(inode);
254
255 if (dax_truncate) {
256 filemap_invalidate_lock(inode->i_mapping);
257 err = fuse_dax_break_layouts(inode, 0, -1);
258 if (err)
259 goto out_inode_unlock;
260 }
261
262 if (is_wb_truncate || dax_truncate)
263 fuse_set_nowrite(inode);
264
265 err = fuse_do_open(fm, get_node_id(inode), file, false);
266 if (!err) {
267 ff = file->private_data;
268 err = fuse_finish_open(inode, file);
269 if (err)
270 fuse_sync_release(fi, ff, file->f_flags);
271 else if (is_truncate)
272 fuse_truncate_update_attr(inode, file);
273 }
274
275 if (is_wb_truncate || dax_truncate)
276 fuse_release_nowrite(inode);
277 if (!err) {
278 if (is_truncate)
279 truncate_pagecache(inode, 0);
280 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
281 invalidate_inode_pages2(inode->i_mapping);
282 }
283 if (dax_truncate)
284 filemap_invalidate_unlock(inode->i_mapping);
285 out_inode_unlock:
286 if (is_wb_truncate || dax_truncate)
287 inode_unlock(inode);
288
289 return err;
290 }
291
fuse_prepare_release(struct fuse_inode * fi,struct fuse_file * ff,unsigned int flags,int opcode,bool sync)292 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
293 unsigned int flags, int opcode, bool sync)
294 {
295 struct fuse_conn *fc = ff->fm->fc;
296 struct fuse_release_args *ra = &ff->args->release_args;
297
298 if (fuse_file_passthrough(ff))
299 fuse_passthrough_release(ff, fuse_inode_backing(fi));
300
301 /* Inode is NULL on error path of fuse_create_open() */
302 if (likely(fi)) {
303 spin_lock(&fi->lock);
304 list_del(&ff->write_entry);
305 spin_unlock(&fi->lock);
306 }
307 spin_lock(&fc->lock);
308 if (!RB_EMPTY_NODE(&ff->polled_node))
309 rb_erase(&ff->polled_node, &fc->polled_files);
310 spin_unlock(&fc->lock);
311
312 wake_up_interruptible_all(&ff->poll_wait);
313
314 if (!ra)
315 return;
316
317 /* ff->args was used for open outarg */
318 memset(ff->args, 0, sizeof(*ff->args));
319 ra->inarg.fh = ff->fh;
320 ra->inarg.flags = flags;
321 ra->args.in_numargs = 1;
322 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
323 ra->args.in_args[0].value = &ra->inarg;
324 ra->args.opcode = opcode;
325 ra->args.nodeid = ff->nodeid;
326 ra->args.force = true;
327 ra->args.nocreds = true;
328
329 /*
330 * Hold inode until release is finished.
331 * From fuse_sync_release() the refcount is 1 and everything's
332 * synchronous, so we are fine with not doing igrab() here.
333 */
334 ra->inode = sync ? NULL : igrab(&fi->inode);
335 }
336
fuse_file_release(struct inode * inode,struct fuse_file * ff,unsigned int open_flags,fl_owner_t id,bool isdir)337 void fuse_file_release(struct inode *inode, struct fuse_file *ff,
338 unsigned int open_flags, fl_owner_t id, bool isdir)
339 {
340 struct fuse_inode *fi = get_fuse_inode(inode);
341 struct fuse_release_args *ra = &ff->args->release_args;
342 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
343
344 fuse_prepare_release(fi, ff, open_flags, opcode, false);
345
346 if (ra && ff->flock) {
347 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
348 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
349 }
350
351 /*
352 * Normally this will send the RELEASE request, however if
353 * some asynchronous READ or WRITE requests are outstanding,
354 * the sending will be delayed.
355 *
356 * Make the release synchronous if this is a fuseblk mount,
357 * synchronous RELEASE is allowed (and desirable) in this case
358 * because the server can be trusted not to screw up.
359 */
360 fuse_file_put(ff, ff->fm->fc->destroy);
361 }
362
fuse_release_common(struct file * file,bool isdir)363 void fuse_release_common(struct file *file, bool isdir)
364 {
365 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
366 (fl_owner_t) file, isdir);
367 }
368
fuse_release(struct inode * inode,struct file * file)369 static int fuse_release(struct inode *inode, struct file *file)
370 {
371 struct fuse_conn *fc = get_fuse_conn(inode);
372
373 /*
374 * Dirty pages might remain despite write_inode_now() call from
375 * fuse_flush() due to writes racing with the close.
376 */
377 if (fc->writeback_cache)
378 write_inode_now(inode, 1);
379
380 fuse_release_common(file, false);
381
382 /* return value is ignored by VFS */
383 return 0;
384 }
385
fuse_sync_release(struct fuse_inode * fi,struct fuse_file * ff,unsigned int flags)386 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
387 unsigned int flags)
388 {
389 WARN_ON(refcount_read(&ff->count) > 1);
390 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
391 fuse_file_put(ff, true);
392 }
393 EXPORT_SYMBOL_GPL(fuse_sync_release);
394
395 /*
396 * Scramble the ID space with XTEA, so that the value of the files_struct
397 * pointer is not exposed to userspace.
398 */
fuse_lock_owner_id(struct fuse_conn * fc,fl_owner_t id)399 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
400 {
401 u32 *k = fc->scramble_key;
402 u64 v = (unsigned long) id;
403 u32 v0 = v;
404 u32 v1 = v >> 32;
405 u32 sum = 0;
406 int i;
407
408 for (i = 0; i < 32; i++) {
409 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
410 sum += 0x9E3779B9;
411 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
412 }
413
414 return (u64) v0 + ((u64) v1 << 32);
415 }
416
417 struct fuse_writepage_args {
418 struct fuse_io_args ia;
419 struct list_head queue_entry;
420 struct inode *inode;
421 struct fuse_sync_bucket *bucket;
422 };
423
424 /*
425 * Wait for all pending writepages on the inode to finish.
426 *
427 * This is currently done by blocking further writes with FUSE_NOWRITE
428 * and waiting for all sent writes to complete.
429 *
430 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
431 * could conflict with truncation.
432 */
fuse_sync_writes(struct inode * inode)433 static void fuse_sync_writes(struct inode *inode)
434 {
435 fuse_set_nowrite(inode);
436 fuse_release_nowrite(inode);
437 }
438
fuse_flush(struct file * file,fl_owner_t id)439 static int fuse_flush(struct file *file, fl_owner_t id)
440 {
441 struct inode *inode = file_inode(file);
442 struct fuse_mount *fm = get_fuse_mount(inode);
443 struct fuse_file *ff = file->private_data;
444 struct fuse_flush_in inarg;
445 FUSE_ARGS(args);
446 int err;
447
448 if (fuse_is_bad(inode))
449 return -EIO;
450
451 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
452 return 0;
453
454 err = write_inode_now(inode, 1);
455 if (err)
456 return err;
457
458 err = filemap_check_errors(file->f_mapping);
459 if (err)
460 return err;
461
462 err = 0;
463 if (fm->fc->no_flush)
464 goto inval_attr_out;
465
466 memset(&inarg, 0, sizeof(inarg));
467 inarg.fh = ff->fh;
468 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
469 args.opcode = FUSE_FLUSH;
470 args.nodeid = get_node_id(inode);
471 args.in_numargs = 1;
472 args.in_args[0].size = sizeof(inarg);
473 args.in_args[0].value = &inarg;
474 args.force = true;
475
476 err = fuse_simple_request(fm, &args);
477 if (err == -ENOSYS) {
478 fm->fc->no_flush = 1;
479 err = 0;
480 }
481
482 inval_attr_out:
483 /*
484 * In memory i_blocks is not maintained by fuse, if writeback cache is
485 * enabled, i_blocks from cached attr may not be accurate.
486 */
487 if (!err && fm->fc->writeback_cache)
488 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
489 return err;
490 }
491
fuse_fsync_common(struct file * file,loff_t start,loff_t end,int datasync,int opcode)492 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
493 int datasync, int opcode)
494 {
495 struct inode *inode = file->f_mapping->host;
496 struct fuse_mount *fm = get_fuse_mount(inode);
497 struct fuse_file *ff = file->private_data;
498 FUSE_ARGS(args);
499 struct fuse_fsync_in inarg;
500
501 memset(&inarg, 0, sizeof(inarg));
502 inarg.fh = ff->fh;
503 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
504 args.opcode = opcode;
505 args.nodeid = get_node_id(inode);
506 args.in_numargs = 1;
507 args.in_args[0].size = sizeof(inarg);
508 args.in_args[0].value = &inarg;
509 return fuse_simple_request(fm, &args);
510 }
511
fuse_fsync(struct file * file,loff_t start,loff_t end,int datasync)512 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
513 int datasync)
514 {
515 struct inode *inode = file->f_mapping->host;
516 struct fuse_conn *fc = get_fuse_conn(inode);
517 int err;
518
519 if (fuse_is_bad(inode))
520 return -EIO;
521
522 inode_lock(inode);
523
524 /*
525 * Start writeback against all dirty pages of the inode, then
526 * wait for all outstanding writes, before sending the FSYNC
527 * request.
528 */
529 err = file_write_and_wait_range(file, start, end);
530 if (err)
531 goto out;
532
533 fuse_sync_writes(inode);
534
535 /*
536 * Due to implementation of fuse writeback
537 * file_write_and_wait_range() does not catch errors.
538 * We have to do this directly after fuse_sync_writes()
539 */
540 err = file_check_and_advance_wb_err(file);
541 if (err)
542 goto out;
543
544 err = sync_inode_metadata(inode, 1);
545 if (err)
546 goto out;
547
548 if (fc->no_fsync)
549 goto out;
550
551 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
552 if (err == -ENOSYS) {
553 fc->no_fsync = 1;
554 err = 0;
555 }
556 out:
557 inode_unlock(inode);
558
559 return err;
560 }
561
fuse_read_args_fill(struct fuse_io_args * ia,struct file * file,loff_t pos,size_t count,int opcode)562 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
563 size_t count, int opcode)
564 {
565 struct fuse_file *ff = file->private_data;
566 struct fuse_args *args = &ia->ap.args;
567
568 ia->read.in.fh = ff->fh;
569 ia->read.in.offset = pos;
570 ia->read.in.size = count;
571 ia->read.in.flags = file->f_flags;
572 args->opcode = opcode;
573 args->nodeid = ff->nodeid;
574 args->in_numargs = 1;
575 args->in_args[0].size = sizeof(ia->read.in);
576 args->in_args[0].value = &ia->read.in;
577 args->out_argvar = true;
578 args->out_numargs = 1;
579 args->out_args[0].size = count;
580 }
581
fuse_release_user_pages(struct fuse_args_pages * ap,ssize_t nres,bool should_dirty)582 static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres,
583 bool should_dirty)
584 {
585 unsigned int i;
586
587 for (i = 0; i < ap->num_folios; i++) {
588 if (should_dirty)
589 folio_mark_dirty_lock(ap->folios[i]);
590 if (ap->args.is_pinned)
591 unpin_folio(ap->folios[i]);
592 }
593
594 if (nres > 0 && ap->args.invalidate_vmap)
595 invalidate_kernel_vmap_range(ap->args.vmap_base, nres);
596 }
597
fuse_io_release(struct kref * kref)598 static void fuse_io_release(struct kref *kref)
599 {
600 kfree(container_of(kref, struct fuse_io_priv, refcnt));
601 }
602
fuse_get_res_by_io(struct fuse_io_priv * io)603 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
604 {
605 if (io->err)
606 return io->err;
607
608 if (io->bytes >= 0 && io->write)
609 return -EIO;
610
611 return io->bytes < 0 ? io->size : io->bytes;
612 }
613
614 /*
615 * In case of short read, the caller sets 'pos' to the position of
616 * actual end of fuse request in IO request. Otherwise, if bytes_requested
617 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
618 *
619 * An example:
620 * User requested DIO read of 64K. It was split into two 32K fuse requests,
621 * both submitted asynchronously. The first of them was ACKed by userspace as
622 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
623 * second request was ACKed as short, e.g. only 1K was read, resulting in
624 * pos == 33K.
625 *
626 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
627 * will be equal to the length of the longest contiguous fragment of
628 * transferred data starting from the beginning of IO request.
629 */
fuse_aio_complete(struct fuse_io_priv * io,int err,ssize_t pos)630 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
631 {
632 int left;
633
634 spin_lock(&io->lock);
635 if (err)
636 io->err = io->err ? : err;
637 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
638 io->bytes = pos;
639
640 left = --io->reqs;
641 if (!left && io->blocking)
642 complete(io->done);
643 spin_unlock(&io->lock);
644
645 if (!left && !io->blocking) {
646 ssize_t res = fuse_get_res_by_io(io);
647
648 if (res >= 0) {
649 struct inode *inode = file_inode(io->iocb->ki_filp);
650 struct fuse_conn *fc = get_fuse_conn(inode);
651 struct fuse_inode *fi = get_fuse_inode(inode);
652
653 spin_lock(&fi->lock);
654 fi->attr_version = atomic64_inc_return(&fc->attr_version);
655 spin_unlock(&fi->lock);
656 }
657
658 io->iocb->ki_complete(io->iocb, res);
659 }
660
661 kref_put(&io->refcnt, fuse_io_release);
662 }
663
fuse_io_alloc(struct fuse_io_priv * io,unsigned int nfolios)664 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
665 unsigned int nfolios)
666 {
667 struct fuse_io_args *ia;
668
669 ia = kzalloc(sizeof(*ia), GFP_KERNEL);
670 if (ia) {
671 ia->io = io;
672 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL,
673 &ia->ap.descs);
674 if (!ia->ap.folios) {
675 kfree(ia);
676 ia = NULL;
677 }
678 }
679 return ia;
680 }
681
fuse_io_free(struct fuse_io_args * ia)682 static void fuse_io_free(struct fuse_io_args *ia)
683 {
684 kfree(ia->ap.folios);
685 kfree(ia);
686 }
687
fuse_aio_complete_req(struct fuse_mount * fm,struct fuse_args * args,int err)688 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
689 int err)
690 {
691 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
692 struct fuse_io_priv *io = ia->io;
693 ssize_t pos = -1;
694 size_t nres;
695
696 if (err) {
697 /* Nothing */
698 } else if (io->write) {
699 if (ia->write.out.size > ia->write.in.size) {
700 err = -EIO;
701 } else {
702 nres = ia->write.out.size;
703 if (ia->write.in.size != ia->write.out.size)
704 pos = ia->write.in.offset - io->offset +
705 ia->write.out.size;
706 }
707 } else {
708 u32 outsize = args->out_args[0].size;
709
710 nres = outsize;
711 if (ia->read.in.size != outsize)
712 pos = ia->read.in.offset - io->offset + outsize;
713 }
714
715 fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty);
716
717 fuse_aio_complete(io, err, pos);
718 fuse_io_free(ia);
719 }
720
fuse_async_req_send(struct fuse_mount * fm,struct fuse_io_args * ia,size_t num_bytes)721 static ssize_t fuse_async_req_send(struct fuse_mount *fm,
722 struct fuse_io_args *ia, size_t num_bytes)
723 {
724 ssize_t err;
725 struct fuse_io_priv *io = ia->io;
726
727 spin_lock(&io->lock);
728 kref_get(&io->refcnt);
729 io->size += num_bytes;
730 io->reqs++;
731 spin_unlock(&io->lock);
732
733 ia->ap.args.end = fuse_aio_complete_req;
734 ia->ap.args.may_block = io->should_dirty;
735 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
736 if (err)
737 fuse_aio_complete_req(fm, &ia->ap.args, err);
738
739 return num_bytes;
740 }
741
fuse_send_read(struct fuse_io_args * ia,loff_t pos,size_t count,fl_owner_t owner)742 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
743 fl_owner_t owner)
744 {
745 struct file *file = ia->io->iocb->ki_filp;
746 struct fuse_file *ff = file->private_data;
747 struct fuse_mount *fm = ff->fm;
748
749 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
750 if (owner != NULL) {
751 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
752 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
753 }
754
755 if (ia->io->async)
756 return fuse_async_req_send(fm, ia, count);
757
758 return fuse_simple_request(fm, &ia->ap.args);
759 }
760
fuse_read_update_size(struct inode * inode,loff_t size,u64 attr_ver)761 static void fuse_read_update_size(struct inode *inode, loff_t size,
762 u64 attr_ver)
763 {
764 struct fuse_conn *fc = get_fuse_conn(inode);
765 struct fuse_inode *fi = get_fuse_inode(inode);
766
767 spin_lock(&fi->lock);
768 if (attr_ver >= fi->attr_version && size < inode->i_size &&
769 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
770 fi->attr_version = atomic64_inc_return(&fc->attr_version);
771 i_size_write(inode, size);
772 }
773 spin_unlock(&fi->lock);
774 }
775
fuse_short_read(struct inode * inode,u64 attr_ver,size_t num_read,struct fuse_args_pages * ap)776 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
777 struct fuse_args_pages *ap)
778 {
779 struct fuse_conn *fc = get_fuse_conn(inode);
780
781 /*
782 * If writeback_cache is enabled, a short read means there's a hole in
783 * the file. Some data after the hole is in page cache, but has not
784 * reached the client fs yet. So the hole is not present there.
785 */
786 if (!fc->writeback_cache) {
787 loff_t pos = folio_pos(ap->folios[0]) + num_read;
788 fuse_read_update_size(inode, pos, attr_ver);
789 }
790 }
791
fuse_do_readfolio(struct file * file,struct folio * folio,size_t off,size_t len)792 static int fuse_do_readfolio(struct file *file, struct folio *folio,
793 size_t off, size_t len)
794 {
795 struct inode *inode = folio->mapping->host;
796 struct fuse_mount *fm = get_fuse_mount(inode);
797 loff_t pos = folio_pos(folio) + off;
798 struct fuse_folio_desc desc = {
799 .offset = off,
800 .length = len,
801 };
802 struct fuse_io_args ia = {
803 .ap.args.page_zeroing = true,
804 .ap.args.out_pages = true,
805 .ap.num_folios = 1,
806 .ap.folios = &folio,
807 .ap.descs = &desc,
808 };
809 ssize_t res;
810 u64 attr_ver;
811
812 attr_ver = fuse_get_attr_version(fm->fc);
813
814 /* Don't overflow end offset */
815 if (pos + (desc.length - 1) == LLONG_MAX)
816 desc.length--;
817
818 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
819 res = fuse_simple_request(fm, &ia.ap.args);
820 if (res < 0)
821 return res;
822 /*
823 * Short read means EOF. If file size is larger, truncate it
824 */
825 if (res < desc.length)
826 fuse_short_read(inode, attr_ver, res, &ia.ap);
827
828 return 0;
829 }
830
fuse_read_folio(struct file * file,struct folio * folio)831 static int fuse_read_folio(struct file *file, struct folio *folio)
832 {
833 struct inode *inode = folio->mapping->host;
834 int err;
835
836 err = -EIO;
837 if (fuse_is_bad(inode))
838 goto out;
839
840 err = fuse_do_readfolio(file, folio, 0, folio_size(folio));
841 if (!err)
842 folio_mark_uptodate(folio);
843
844 fuse_invalidate_atime(inode);
845 out:
846 folio_unlock(folio);
847 return err;
848 }
849
fuse_iomap_read_folio_range(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t len)850 static int fuse_iomap_read_folio_range(const struct iomap_iter *iter,
851 struct folio *folio, loff_t pos,
852 size_t len)
853 {
854 struct file *file = iter->private;
855 size_t off = offset_in_folio(folio, pos);
856
857 return fuse_do_readfolio(file, folio, off, len);
858 }
859
fuse_readpages_end(struct fuse_mount * fm,struct fuse_args * args,int err)860 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
861 int err)
862 {
863 int i;
864 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
865 struct fuse_args_pages *ap = &ia->ap;
866 size_t count = ia->read.in.size;
867 size_t num_read = args->out_args[0].size;
868 struct address_space *mapping = NULL;
869
870 for (i = 0; mapping == NULL && i < ap->num_folios; i++)
871 mapping = ap->folios[i]->mapping;
872
873 if (mapping) {
874 struct inode *inode = mapping->host;
875
876 /*
877 * Short read means EOF. If file size is larger, truncate it
878 */
879 if (!err && num_read < count)
880 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
881
882 fuse_invalidate_atime(inode);
883 }
884
885 for (i = 0; i < ap->num_folios; i++) {
886 folio_end_read(ap->folios[i], !err);
887 folio_put(ap->folios[i]);
888 }
889 if (ia->ff)
890 fuse_file_put(ia->ff, false);
891
892 fuse_io_free(ia);
893 }
894
fuse_send_readpages(struct fuse_io_args * ia,struct file * file,unsigned int count)895 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
896 unsigned int count)
897 {
898 struct fuse_file *ff = file->private_data;
899 struct fuse_mount *fm = ff->fm;
900 struct fuse_args_pages *ap = &ia->ap;
901 loff_t pos = folio_pos(ap->folios[0]);
902 ssize_t res;
903 int err;
904
905 ap->args.out_pages = true;
906 ap->args.page_zeroing = true;
907 ap->args.page_replace = true;
908
909 /* Don't overflow end offset */
910 if (pos + (count - 1) == LLONG_MAX) {
911 count--;
912 ap->descs[ap->num_folios - 1].length--;
913 }
914 WARN_ON((loff_t) (pos + count) < 0);
915
916 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
917 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
918 if (fm->fc->async_read) {
919 ia->ff = fuse_file_get(ff);
920 ap->args.end = fuse_readpages_end;
921 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
922 if (!err)
923 return;
924 } else {
925 res = fuse_simple_request(fm, &ap->args);
926 err = res < 0 ? res : 0;
927 }
928 fuse_readpages_end(fm, &ap->args, err);
929 }
930
fuse_readahead(struct readahead_control * rac)931 static void fuse_readahead(struct readahead_control *rac)
932 {
933 struct inode *inode = rac->mapping->host;
934 struct fuse_conn *fc = get_fuse_conn(inode);
935 unsigned int max_pages, nr_pages;
936 struct folio *folio = NULL;
937
938 if (fuse_is_bad(inode))
939 return;
940
941 max_pages = min_t(unsigned int, fc->max_pages,
942 fc->max_read / PAGE_SIZE);
943
944 /*
945 * This is only accurate the first time through, since readahead_folio()
946 * doesn't update readahead_count() from the previous folio until the
947 * next call. Grab nr_pages here so we know how many pages we're going
948 * to have to process. This means that we will exit here with
949 * readahead_count() == folio_nr_pages(last_folio), but we will have
950 * consumed all of the folios, and read_pages() will call
951 * readahead_folio() again which will clean up the rac.
952 */
953 nr_pages = readahead_count(rac);
954
955 while (nr_pages) {
956 struct fuse_io_args *ia;
957 struct fuse_args_pages *ap;
958 unsigned cur_pages = min(max_pages, nr_pages);
959 unsigned int pages = 0;
960
961 if (fc->num_background >= fc->congestion_threshold &&
962 rac->ra->async_size >= readahead_count(rac))
963 /*
964 * Congested and only async pages left, so skip the
965 * rest.
966 */
967 break;
968
969 ia = fuse_io_alloc(NULL, cur_pages);
970 if (!ia)
971 break;
972 ap = &ia->ap;
973
974 while (pages < cur_pages) {
975 unsigned int folio_pages;
976
977 /*
978 * This returns a folio with a ref held on it.
979 * The ref needs to be held until the request is
980 * completed, since the splice case (see
981 * fuse_try_move_page()) drops the ref after it's
982 * replaced in the page cache.
983 */
984 if (!folio)
985 folio = __readahead_folio(rac);
986
987 folio_pages = folio_nr_pages(folio);
988 if (folio_pages > cur_pages - pages) {
989 /*
990 * Large folios belonging to fuse will never
991 * have more pages than max_pages.
992 */
993 WARN_ON(!pages);
994 break;
995 }
996
997 ap->folios[ap->num_folios] = folio;
998 ap->descs[ap->num_folios].length = folio_size(folio);
999 ap->num_folios++;
1000 pages += folio_pages;
1001 folio = NULL;
1002 }
1003 fuse_send_readpages(ia, rac->file, pages << PAGE_SHIFT);
1004 nr_pages -= pages;
1005 }
1006 if (folio) {
1007 folio_end_read(folio, false);
1008 folio_put(folio);
1009 }
1010 }
1011
fuse_cache_read_iter(struct kiocb * iocb,struct iov_iter * to)1012 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
1013 {
1014 struct inode *inode = iocb->ki_filp->f_mapping->host;
1015 struct fuse_conn *fc = get_fuse_conn(inode);
1016
1017 /*
1018 * In auto invalidate mode, always update attributes on read.
1019 * Otherwise, only update if we attempt to read past EOF (to ensure
1020 * i_size is up to date).
1021 */
1022 if (fc->auto_inval_data ||
1023 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
1024 int err;
1025 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
1026 if (err)
1027 return err;
1028 }
1029
1030 return generic_file_read_iter(iocb, to);
1031 }
1032
fuse_write_args_fill(struct fuse_io_args * ia,struct fuse_file * ff,loff_t pos,size_t count)1033 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1034 loff_t pos, size_t count)
1035 {
1036 struct fuse_args *args = &ia->ap.args;
1037
1038 ia->write.in.fh = ff->fh;
1039 ia->write.in.offset = pos;
1040 ia->write.in.size = count;
1041 args->opcode = FUSE_WRITE;
1042 args->nodeid = ff->nodeid;
1043 args->in_numargs = 2;
1044 if (ff->fm->fc->minor < 9)
1045 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1046 else
1047 args->in_args[0].size = sizeof(ia->write.in);
1048 args->in_args[0].value = &ia->write.in;
1049 args->in_args[1].size = count;
1050 args->out_numargs = 1;
1051 args->out_args[0].size = sizeof(ia->write.out);
1052 args->out_args[0].value = &ia->write.out;
1053 }
1054
fuse_write_flags(struct kiocb * iocb)1055 static unsigned int fuse_write_flags(struct kiocb *iocb)
1056 {
1057 unsigned int flags = iocb->ki_filp->f_flags;
1058
1059 if (iocb_is_dsync(iocb))
1060 flags |= O_DSYNC;
1061 if (iocb->ki_flags & IOCB_SYNC)
1062 flags |= O_SYNC;
1063
1064 return flags;
1065 }
1066
fuse_send_write(struct fuse_io_args * ia,loff_t pos,size_t count,fl_owner_t owner)1067 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1068 size_t count, fl_owner_t owner)
1069 {
1070 struct kiocb *iocb = ia->io->iocb;
1071 struct file *file = iocb->ki_filp;
1072 struct fuse_file *ff = file->private_data;
1073 struct fuse_mount *fm = ff->fm;
1074 struct fuse_write_in *inarg = &ia->write.in;
1075 ssize_t err;
1076
1077 fuse_write_args_fill(ia, ff, pos, count);
1078 inarg->flags = fuse_write_flags(iocb);
1079 if (owner != NULL) {
1080 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
1081 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
1082 }
1083
1084 if (ia->io->async)
1085 return fuse_async_req_send(fm, ia, count);
1086
1087 err = fuse_simple_request(fm, &ia->ap.args);
1088 if (!err && ia->write.out.size > count)
1089 err = -EIO;
1090
1091 return err ?: ia->write.out.size;
1092 }
1093
fuse_write_update_attr(struct inode * inode,loff_t pos,ssize_t written)1094 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
1095 {
1096 struct fuse_conn *fc = get_fuse_conn(inode);
1097 struct fuse_inode *fi = get_fuse_inode(inode);
1098 bool ret = false;
1099
1100 spin_lock(&fi->lock);
1101 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1102 if (written > 0 && pos > inode->i_size) {
1103 i_size_write(inode, pos);
1104 ret = true;
1105 }
1106 spin_unlock(&fi->lock);
1107
1108 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1109
1110 return ret;
1111 }
1112
fuse_send_write_pages(struct fuse_io_args * ia,struct kiocb * iocb,struct inode * inode,loff_t pos,size_t count)1113 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1114 struct kiocb *iocb, struct inode *inode,
1115 loff_t pos, size_t count)
1116 {
1117 struct fuse_args_pages *ap = &ia->ap;
1118 struct file *file = iocb->ki_filp;
1119 struct fuse_file *ff = file->private_data;
1120 struct fuse_mount *fm = ff->fm;
1121 unsigned int offset, i;
1122 bool short_write;
1123 int err;
1124
1125 for (i = 0; i < ap->num_folios; i++)
1126 folio_wait_writeback(ap->folios[i]);
1127
1128 fuse_write_args_fill(ia, ff, pos, count);
1129 ia->write.in.flags = fuse_write_flags(iocb);
1130 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1131 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1132
1133 err = fuse_simple_request(fm, &ap->args);
1134 if (!err && ia->write.out.size > count)
1135 err = -EIO;
1136
1137 short_write = ia->write.out.size < count;
1138 offset = ap->descs[0].offset;
1139 count = ia->write.out.size;
1140 for (i = 0; i < ap->num_folios; i++) {
1141 struct folio *folio = ap->folios[i];
1142
1143 if (err) {
1144 folio_clear_uptodate(folio);
1145 } else {
1146 if (count >= folio_size(folio) - offset)
1147 count -= folio_size(folio) - offset;
1148 else {
1149 if (short_write)
1150 folio_clear_uptodate(folio);
1151 count = 0;
1152 }
1153 offset = 0;
1154 }
1155 if (ia->write.folio_locked && (i == ap->num_folios - 1))
1156 folio_unlock(folio);
1157 folio_put(folio);
1158 }
1159
1160 return err;
1161 }
1162
fuse_fill_write_pages(struct fuse_io_args * ia,struct address_space * mapping,struct iov_iter * ii,loff_t pos,unsigned int max_folios)1163 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1164 struct address_space *mapping,
1165 struct iov_iter *ii, loff_t pos,
1166 unsigned int max_folios)
1167 {
1168 struct fuse_args_pages *ap = &ia->ap;
1169 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1170 unsigned offset = pos & (PAGE_SIZE - 1);
1171 size_t count = 0;
1172 unsigned int num;
1173 int err = 0;
1174
1175 num = min(iov_iter_count(ii), fc->max_write);
1176
1177 ap->args.in_pages = true;
1178 ap->descs[0].offset = offset;
1179
1180 while (num && ap->num_folios < max_folios) {
1181 size_t tmp;
1182 struct folio *folio;
1183 pgoff_t index = pos >> PAGE_SHIFT;
1184 unsigned int bytes;
1185 unsigned int folio_offset;
1186
1187 again:
1188 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1189 mapping_gfp_mask(mapping));
1190 if (IS_ERR(folio)) {
1191 err = PTR_ERR(folio);
1192 break;
1193 }
1194
1195 if (mapping_writably_mapped(mapping))
1196 flush_dcache_folio(folio);
1197
1198 folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
1199 bytes = min(folio_size(folio) - folio_offset, num);
1200
1201 tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
1202 flush_dcache_folio(folio);
1203
1204 if (!tmp) {
1205 folio_unlock(folio);
1206 folio_put(folio);
1207
1208 /*
1209 * Ensure forward progress by faulting in
1210 * while not holding the folio lock:
1211 */
1212 if (fault_in_iov_iter_readable(ii, bytes)) {
1213 err = -EFAULT;
1214 break;
1215 }
1216
1217 goto again;
1218 }
1219
1220 ap->folios[ap->num_folios] = folio;
1221 ap->descs[ap->num_folios].offset = folio_offset;
1222 ap->descs[ap->num_folios].length = tmp;
1223 ap->num_folios++;
1224
1225 count += tmp;
1226 pos += tmp;
1227 num -= tmp;
1228 offset += tmp;
1229 if (offset == folio_size(folio))
1230 offset = 0;
1231
1232 /* If we copied full folio, mark it uptodate */
1233 if (tmp == folio_size(folio))
1234 folio_mark_uptodate(folio);
1235
1236 if (folio_test_uptodate(folio)) {
1237 folio_unlock(folio);
1238 } else {
1239 ia->write.folio_locked = true;
1240 break;
1241 }
1242 if (!fc->big_writes || offset != 0)
1243 break;
1244 }
1245
1246 return count > 0 ? count : err;
1247 }
1248
fuse_wr_pages(loff_t pos,size_t len,unsigned int max_pages)1249 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1250 unsigned int max_pages)
1251 {
1252 return min_t(unsigned int,
1253 ((pos + len - 1) >> PAGE_SHIFT) -
1254 (pos >> PAGE_SHIFT) + 1,
1255 max_pages);
1256 }
1257
fuse_perform_write(struct kiocb * iocb,struct iov_iter * ii)1258 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
1259 {
1260 struct address_space *mapping = iocb->ki_filp->f_mapping;
1261 struct inode *inode = mapping->host;
1262 struct fuse_conn *fc = get_fuse_conn(inode);
1263 struct fuse_inode *fi = get_fuse_inode(inode);
1264 loff_t pos = iocb->ki_pos;
1265 int err = 0;
1266 ssize_t res = 0;
1267
1268 if (inode->i_size < pos + iov_iter_count(ii))
1269 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1270
1271 do {
1272 ssize_t count;
1273 struct fuse_io_args ia = {};
1274 struct fuse_args_pages *ap = &ia.ap;
1275 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1276 fc->max_pages);
1277
1278 ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1279 if (!ap->folios) {
1280 err = -ENOMEM;
1281 break;
1282 }
1283
1284 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
1285 if (count <= 0) {
1286 err = count;
1287 } else {
1288 err = fuse_send_write_pages(&ia, iocb, inode,
1289 pos, count);
1290 if (!err) {
1291 size_t num_written = ia.write.out.size;
1292
1293 res += num_written;
1294 pos += num_written;
1295
1296 /* break out of the loop on short write */
1297 if (num_written != count)
1298 err = -EIO;
1299 }
1300 }
1301 kfree(ap->folios);
1302 } while (!err && iov_iter_count(ii));
1303
1304 fuse_write_update_attr(inode, pos, res);
1305 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1306
1307 if (!res)
1308 return err;
1309 iocb->ki_pos += res;
1310 return res;
1311 }
1312
fuse_io_past_eof(struct kiocb * iocb,struct iov_iter * iter)1313 static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
1314 {
1315 struct inode *inode = file_inode(iocb->ki_filp);
1316
1317 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1318 }
1319
1320 /*
1321 * @return true if an exclusive lock for direct IO writes is needed
1322 */
fuse_dio_wr_exclusive_lock(struct kiocb * iocb,struct iov_iter * from)1323 static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
1324 {
1325 struct file *file = iocb->ki_filp;
1326 struct fuse_file *ff = file->private_data;
1327 struct inode *inode = file_inode(iocb->ki_filp);
1328 struct fuse_inode *fi = get_fuse_inode(inode);
1329
1330 /* Server side has to advise that it supports parallel dio writes. */
1331 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
1332 return true;
1333
1334 /*
1335 * Append will need to know the eventual EOF - always needs an
1336 * exclusive lock.
1337 */
1338 if (iocb->ki_flags & IOCB_APPEND)
1339 return true;
1340
1341 /* shared locks are not allowed with parallel page cache IO */
1342 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
1343 return true;
1344
1345 /* Parallel dio beyond EOF is not supported, at least for now. */
1346 if (fuse_io_past_eof(iocb, from))
1347 return true;
1348
1349 return false;
1350 }
1351
fuse_dio_lock(struct kiocb * iocb,struct iov_iter * from,bool * exclusive)1352 static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
1353 bool *exclusive)
1354 {
1355 struct inode *inode = file_inode(iocb->ki_filp);
1356 struct fuse_inode *fi = get_fuse_inode(inode);
1357
1358 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
1359 if (*exclusive) {
1360 inode_lock(inode);
1361 } else {
1362 inode_lock_shared(inode);
1363 /*
1364 * New parallal dio allowed only if inode is not in caching
1365 * mode and denies new opens in caching mode. This check
1366 * should be performed only after taking shared inode lock.
1367 * Previous past eof check was without inode lock and might
1368 * have raced, so check it again.
1369 */
1370 if (fuse_io_past_eof(iocb, from) ||
1371 fuse_inode_uncached_io_start(fi, NULL) != 0) {
1372 inode_unlock_shared(inode);
1373 inode_lock(inode);
1374 *exclusive = true;
1375 }
1376 }
1377 }
1378
fuse_dio_unlock(struct kiocb * iocb,bool exclusive)1379 static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
1380 {
1381 struct inode *inode = file_inode(iocb->ki_filp);
1382 struct fuse_inode *fi = get_fuse_inode(inode);
1383
1384 if (exclusive) {
1385 inode_unlock(inode);
1386 } else {
1387 /* Allow opens in caching mode after last parallel dio end */
1388 fuse_inode_uncached_io_end(fi);
1389 inode_unlock_shared(inode);
1390 }
1391 }
1392
1393 static const struct iomap_write_ops fuse_iomap_write_ops = {
1394 .read_folio_range = fuse_iomap_read_folio_range,
1395 };
1396
fuse_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)1397 static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
1398 unsigned int flags, struct iomap *iomap,
1399 struct iomap *srcmap)
1400 {
1401 iomap->type = IOMAP_MAPPED;
1402 iomap->length = length;
1403 iomap->offset = offset;
1404 return 0;
1405 }
1406
1407 static const struct iomap_ops fuse_iomap_ops = {
1408 .iomap_begin = fuse_iomap_begin,
1409 };
1410
fuse_cache_write_iter(struct kiocb * iocb,struct iov_iter * from)1411 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1412 {
1413 struct file *file = iocb->ki_filp;
1414 struct mnt_idmap *idmap = file_mnt_idmap(file);
1415 struct address_space *mapping = file->f_mapping;
1416 ssize_t written = 0;
1417 struct inode *inode = mapping->host;
1418 ssize_t err, count;
1419 struct fuse_conn *fc = get_fuse_conn(inode);
1420 bool writeback = false;
1421
1422 if (fc->writeback_cache) {
1423 /* Update size (EOF optimization) and mode (SUID clearing) */
1424 err = fuse_update_attributes(mapping->host, file,
1425 STATX_SIZE | STATX_MODE);
1426 if (err)
1427 return err;
1428
1429 if (!fc->handle_killpriv_v2 ||
1430 !setattr_should_drop_suidgid(idmap, file_inode(file)))
1431 writeback = true;
1432 }
1433
1434 inode_lock(inode);
1435
1436 err = count = generic_write_checks(iocb, from);
1437 if (err <= 0)
1438 goto out;
1439
1440 task_io_account_write(count);
1441
1442 err = kiocb_modified(iocb);
1443 if (err)
1444 goto out;
1445
1446 if (iocb->ki_flags & IOCB_DIRECT) {
1447 written = generic_file_direct_write(iocb, from);
1448 if (written < 0 || !iov_iter_count(from))
1449 goto out;
1450 written = direct_write_fallback(iocb, from, written,
1451 fuse_perform_write(iocb, from));
1452 } else if (writeback) {
1453 /*
1454 * Use iomap so that we can do granular uptodate reads
1455 * and granular dirty tracking for large folios.
1456 */
1457 written = iomap_file_buffered_write(iocb, from,
1458 &fuse_iomap_ops,
1459 &fuse_iomap_write_ops,
1460 file);
1461 } else {
1462 written = fuse_perform_write(iocb, from);
1463 }
1464 out:
1465 inode_unlock(inode);
1466 if (written > 0)
1467 written = generic_write_sync(iocb, written);
1468
1469 return written ? written : err;
1470 }
1471
fuse_get_user_addr(const struct iov_iter * ii)1472 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1473 {
1474 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
1475 }
1476
fuse_get_frag_size(const struct iov_iter * ii,size_t max_size)1477 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1478 size_t max_size)
1479 {
1480 return min(iov_iter_single_seg_count(ii), max_size);
1481 }
1482
fuse_get_user_pages(struct fuse_args_pages * ap,struct iov_iter * ii,size_t * nbytesp,int write,unsigned int max_pages,bool use_pages_for_kvec_io)1483 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1484 size_t *nbytesp, int write,
1485 unsigned int max_pages,
1486 bool use_pages_for_kvec_io)
1487 {
1488 bool flush_or_invalidate = false;
1489 unsigned int nr_pages = 0;
1490 size_t nbytes = 0; /* # bytes already packed in req */
1491 ssize_t ret = 0;
1492
1493 /* Special case for kernel I/O: can copy directly into the buffer.
1494 * However if the implementation of fuse_conn requires pages instead of
1495 * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead.
1496 */
1497 if (iov_iter_is_kvec(ii)) {
1498 void *user_addr = (void *)fuse_get_user_addr(ii);
1499
1500 if (!use_pages_for_kvec_io) {
1501 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1502
1503 if (write)
1504 ap->args.in_args[1].value = user_addr;
1505 else
1506 ap->args.out_args[0].value = user_addr;
1507
1508 iov_iter_advance(ii, frag_size);
1509 *nbytesp = frag_size;
1510 return 0;
1511 }
1512
1513 if (is_vmalloc_addr(user_addr)) {
1514 ap->args.vmap_base = user_addr;
1515 flush_or_invalidate = true;
1516 }
1517 }
1518
1519 /*
1520 * Until there is support for iov_iter_extract_folios(), we have to
1521 * manually extract pages using iov_iter_extract_pages() and then
1522 * copy that to a folios array.
1523 */
1524 struct page **pages = kzalloc(max_pages * sizeof(struct page *),
1525 GFP_KERNEL);
1526 if (!pages) {
1527 ret = -ENOMEM;
1528 goto out;
1529 }
1530
1531 while (nbytes < *nbytesp && nr_pages < max_pages) {
1532 unsigned nfolios, i;
1533 size_t start;
1534
1535 ret = iov_iter_extract_pages(ii, &pages,
1536 *nbytesp - nbytes,
1537 max_pages - nr_pages,
1538 0, &start);
1539 if (ret < 0)
1540 break;
1541
1542 nbytes += ret;
1543
1544 nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
1545
1546 for (i = 0; i < nfolios; i++) {
1547 struct folio *folio = page_folio(pages[i]);
1548 unsigned int offset = start +
1549 (folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
1550 unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start);
1551
1552 ap->descs[ap->num_folios].offset = offset;
1553 ap->descs[ap->num_folios].length = len;
1554 ap->folios[ap->num_folios] = folio;
1555 start = 0;
1556 ret -= len;
1557 ap->num_folios++;
1558 }
1559
1560 nr_pages += nfolios;
1561 }
1562 kfree(pages);
1563
1564 if (write && flush_or_invalidate)
1565 flush_kernel_vmap_range(ap->args.vmap_base, nbytes);
1566
1567 ap->args.invalidate_vmap = !write && flush_or_invalidate;
1568 ap->args.is_pinned = iov_iter_extract_will_pin(ii);
1569 ap->args.user_pages = true;
1570 if (write)
1571 ap->args.in_pages = true;
1572 else
1573 ap->args.out_pages = true;
1574
1575 out:
1576 *nbytesp = nbytes;
1577
1578 return ret < 0 ? ret : 0;
1579 }
1580
fuse_direct_io(struct fuse_io_priv * io,struct iov_iter * iter,loff_t * ppos,int flags)1581 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1582 loff_t *ppos, int flags)
1583 {
1584 int write = flags & FUSE_DIO_WRITE;
1585 int cuse = flags & FUSE_DIO_CUSE;
1586 struct file *file = io->iocb->ki_filp;
1587 struct address_space *mapping = file->f_mapping;
1588 struct inode *inode = mapping->host;
1589 struct fuse_file *ff = file->private_data;
1590 struct fuse_conn *fc = ff->fm->fc;
1591 size_t nmax = write ? fc->max_write : fc->max_read;
1592 loff_t pos = *ppos;
1593 size_t count = iov_iter_count(iter);
1594 pgoff_t idx_from = pos >> PAGE_SHIFT;
1595 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1596 ssize_t res = 0;
1597 int err = 0;
1598 struct fuse_io_args *ia;
1599 unsigned int max_pages;
1600 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
1601
1602 max_pages = iov_iter_npages(iter, fc->max_pages);
1603 ia = fuse_io_alloc(io, max_pages);
1604 if (!ia)
1605 return -ENOMEM;
1606
1607 if (fopen_direct_io && fc->direct_io_allow_mmap) {
1608 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1609 if (res) {
1610 fuse_io_free(ia);
1611 return res;
1612 }
1613 }
1614 if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) {
1615 if (!write)
1616 inode_lock(inode);
1617 fuse_sync_writes(inode);
1618 if (!write)
1619 inode_unlock(inode);
1620 }
1621
1622 if (fopen_direct_io && write) {
1623 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1624 if (res) {
1625 fuse_io_free(ia);
1626 return res;
1627 }
1628 }
1629
1630 io->should_dirty = !write && user_backed_iter(iter);
1631 while (count) {
1632 ssize_t nres;
1633 fl_owner_t owner = current->files;
1634 size_t nbytes = min(count, nmax);
1635
1636 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1637 max_pages, fc->use_pages_for_kvec_io);
1638 if (err && !nbytes)
1639 break;
1640
1641 if (write) {
1642 if (!capable(CAP_FSETID))
1643 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1644
1645 nres = fuse_send_write(ia, pos, nbytes, owner);
1646 } else {
1647 nres = fuse_send_read(ia, pos, nbytes, owner);
1648 }
1649
1650 if (!io->async || nres < 0) {
1651 fuse_release_user_pages(&ia->ap, nres, io->should_dirty);
1652 fuse_io_free(ia);
1653 }
1654 ia = NULL;
1655 if (nres < 0) {
1656 iov_iter_revert(iter, nbytes);
1657 err = nres;
1658 break;
1659 }
1660 WARN_ON(nres > nbytes);
1661
1662 count -= nres;
1663 res += nres;
1664 pos += nres;
1665 if (nres != nbytes) {
1666 iov_iter_revert(iter, nbytes - nres);
1667 break;
1668 }
1669 if (count) {
1670 max_pages = iov_iter_npages(iter, fc->max_pages);
1671 ia = fuse_io_alloc(io, max_pages);
1672 if (!ia)
1673 break;
1674 }
1675 }
1676 if (ia)
1677 fuse_io_free(ia);
1678 if (res > 0)
1679 *ppos = pos;
1680
1681 return res > 0 ? res : err;
1682 }
1683 EXPORT_SYMBOL_GPL(fuse_direct_io);
1684
__fuse_direct_read(struct fuse_io_priv * io,struct iov_iter * iter,loff_t * ppos)1685 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1686 struct iov_iter *iter,
1687 loff_t *ppos)
1688 {
1689 ssize_t res;
1690 struct inode *inode = file_inode(io->iocb->ki_filp);
1691
1692 res = fuse_direct_io(io, iter, ppos, 0);
1693
1694 fuse_invalidate_atime(inode);
1695
1696 return res;
1697 }
1698
1699 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1700
fuse_direct_read_iter(struct kiocb * iocb,struct iov_iter * to)1701 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1702 {
1703 ssize_t res;
1704
1705 if (!is_sync_kiocb(iocb)) {
1706 res = fuse_direct_IO(iocb, to);
1707 } else {
1708 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1709
1710 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1711 }
1712
1713 return res;
1714 }
1715
fuse_direct_write_iter(struct kiocb * iocb,struct iov_iter * from)1716 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1717 {
1718 struct inode *inode = file_inode(iocb->ki_filp);
1719 ssize_t res;
1720 bool exclusive;
1721
1722 fuse_dio_lock(iocb, from, &exclusive);
1723 res = generic_write_checks(iocb, from);
1724 if (res > 0) {
1725 task_io_account_write(res);
1726 if (!is_sync_kiocb(iocb)) {
1727 res = fuse_direct_IO(iocb, from);
1728 } else {
1729 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1730
1731 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1732 FUSE_DIO_WRITE);
1733 fuse_write_update_attr(inode, iocb->ki_pos, res);
1734 }
1735 }
1736 fuse_dio_unlock(iocb, exclusive);
1737
1738 return res;
1739 }
1740
fuse_file_read_iter(struct kiocb * iocb,struct iov_iter * to)1741 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1742 {
1743 struct file *file = iocb->ki_filp;
1744 struct fuse_file *ff = file->private_data;
1745 struct inode *inode = file_inode(file);
1746
1747 if (fuse_is_bad(inode))
1748 return -EIO;
1749
1750 if (FUSE_IS_DAX(inode))
1751 return fuse_dax_read_iter(iocb, to);
1752
1753 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1754 if (ff->open_flags & FOPEN_DIRECT_IO)
1755 return fuse_direct_read_iter(iocb, to);
1756 else if (fuse_file_passthrough(ff))
1757 return fuse_passthrough_read_iter(iocb, to);
1758 else
1759 return fuse_cache_read_iter(iocb, to);
1760 }
1761
fuse_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1762 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1763 {
1764 struct file *file = iocb->ki_filp;
1765 struct fuse_file *ff = file->private_data;
1766 struct inode *inode = file_inode(file);
1767
1768 if (fuse_is_bad(inode))
1769 return -EIO;
1770
1771 if (FUSE_IS_DAX(inode))
1772 return fuse_dax_write_iter(iocb, from);
1773
1774 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1775 if (ff->open_flags & FOPEN_DIRECT_IO)
1776 return fuse_direct_write_iter(iocb, from);
1777 else if (fuse_file_passthrough(ff))
1778 return fuse_passthrough_write_iter(iocb, from);
1779 else
1780 return fuse_cache_write_iter(iocb, from);
1781 }
1782
fuse_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1783 static ssize_t fuse_splice_read(struct file *in, loff_t *ppos,
1784 struct pipe_inode_info *pipe, size_t len,
1785 unsigned int flags)
1786 {
1787 struct fuse_file *ff = in->private_data;
1788
1789 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1790 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1791 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags);
1792 else
1793 return filemap_splice_read(in, ppos, pipe, len, flags);
1794 }
1795
fuse_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1796 static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
1797 loff_t *ppos, size_t len, unsigned int flags)
1798 {
1799 struct fuse_file *ff = out->private_data;
1800
1801 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1802 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1803 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags);
1804 else
1805 return iter_file_splice_write(pipe, out, ppos, len, flags);
1806 }
1807
fuse_writepage_free(struct fuse_writepage_args * wpa)1808 static void fuse_writepage_free(struct fuse_writepage_args *wpa)
1809 {
1810 struct fuse_args_pages *ap = &wpa->ia.ap;
1811
1812 if (wpa->bucket)
1813 fuse_sync_bucket_dec(wpa->bucket);
1814
1815 fuse_file_put(wpa->ia.ff, false);
1816
1817 kfree(ap->folios);
1818 kfree(wpa);
1819 }
1820
fuse_writepage_finish(struct fuse_writepage_args * wpa)1821 static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
1822 {
1823 struct fuse_args_pages *ap = &wpa->ia.ap;
1824 struct inode *inode = wpa->inode;
1825 struct fuse_inode *fi = get_fuse_inode(inode);
1826 struct backing_dev_info *bdi = inode_to_bdi(inode);
1827 int i;
1828
1829 for (i = 0; i < ap->num_folios; i++) {
1830 /*
1831 * Benchmarks showed that ending writeback within the
1832 * scope of the fi->lock alleviates xarray lock
1833 * contention and noticeably improves performance.
1834 */
1835 iomap_finish_folio_write(inode, ap->folios[i], 1);
1836 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1837 wb_writeout_inc(&bdi->wb);
1838 }
1839
1840 wake_up(&fi->page_waitq);
1841 }
1842
1843 /* Called under fi->lock, may release and reacquire it */
fuse_send_writepage(struct fuse_mount * fm,struct fuse_writepage_args * wpa,loff_t size)1844 static void fuse_send_writepage(struct fuse_mount *fm,
1845 struct fuse_writepage_args *wpa, loff_t size)
1846 __releases(fi->lock)
1847 __acquires(fi->lock)
1848 {
1849 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1850 struct fuse_args_pages *ap = &wpa->ia.ap;
1851 struct fuse_write_in *inarg = &wpa->ia.write.in;
1852 struct fuse_args *args = &ap->args;
1853 __u64 data_size = 0;
1854 int err, i;
1855
1856 for (i = 0; i < ap->num_folios; i++)
1857 data_size += ap->descs[i].length;
1858
1859 fi->writectr++;
1860 if (inarg->offset + data_size <= size) {
1861 inarg->size = data_size;
1862 } else if (inarg->offset < size) {
1863 inarg->size = size - inarg->offset;
1864 } else {
1865 /* Got truncated off completely */
1866 goto out_free;
1867 }
1868
1869 args->in_args[1].size = inarg->size;
1870 args->force = true;
1871 args->nocreds = true;
1872
1873 err = fuse_simple_background(fm, args, GFP_ATOMIC);
1874 if (err == -ENOMEM) {
1875 spin_unlock(&fi->lock);
1876 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
1877 spin_lock(&fi->lock);
1878 }
1879
1880 /* Fails on broken connection only */
1881 if (unlikely(err))
1882 goto out_free;
1883
1884 return;
1885
1886 out_free:
1887 fi->writectr--;
1888 fuse_writepage_finish(wpa);
1889 spin_unlock(&fi->lock);
1890 fuse_writepage_free(wpa);
1891 spin_lock(&fi->lock);
1892 }
1893
1894 /*
1895 * If fi->writectr is positive (no truncate or fsync going on) send
1896 * all queued writepage requests.
1897 *
1898 * Called with fi->lock
1899 */
fuse_flush_writepages(struct inode * inode)1900 void fuse_flush_writepages(struct inode *inode)
1901 __releases(fi->lock)
1902 __acquires(fi->lock)
1903 {
1904 struct fuse_mount *fm = get_fuse_mount(inode);
1905 struct fuse_inode *fi = get_fuse_inode(inode);
1906 loff_t crop = i_size_read(inode);
1907 struct fuse_writepage_args *wpa;
1908
1909 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1910 wpa = list_entry(fi->queued_writes.next,
1911 struct fuse_writepage_args, queue_entry);
1912 list_del_init(&wpa->queue_entry);
1913 fuse_send_writepage(fm, wpa, crop);
1914 }
1915 }
1916
fuse_writepage_end(struct fuse_mount * fm,struct fuse_args * args,int error)1917 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
1918 int error)
1919 {
1920 struct fuse_writepage_args *wpa =
1921 container_of(args, typeof(*wpa), ia.ap.args);
1922 struct inode *inode = wpa->inode;
1923 struct fuse_inode *fi = get_fuse_inode(inode);
1924 struct fuse_conn *fc = get_fuse_conn(inode);
1925
1926 mapping_set_error(inode->i_mapping, error);
1927 /*
1928 * A writeback finished and this might have updated mtime/ctime on
1929 * server making local mtime/ctime stale. Hence invalidate attrs.
1930 * Do this only if writeback_cache is not enabled. If writeback_cache
1931 * is enabled, we trust local ctime/mtime.
1932 */
1933 if (!fc->writeback_cache)
1934 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
1935 spin_lock(&fi->lock);
1936 fi->writectr--;
1937 fuse_writepage_finish(wpa);
1938 spin_unlock(&fi->lock);
1939 fuse_writepage_free(wpa);
1940 }
1941
__fuse_write_file_get(struct fuse_inode * fi)1942 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
1943 {
1944 struct fuse_file *ff;
1945
1946 spin_lock(&fi->lock);
1947 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
1948 write_entry);
1949 if (ff)
1950 fuse_file_get(ff);
1951 spin_unlock(&fi->lock);
1952
1953 return ff;
1954 }
1955
fuse_write_file_get(struct fuse_inode * fi)1956 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
1957 {
1958 struct fuse_file *ff = __fuse_write_file_get(fi);
1959 WARN_ON(!ff);
1960 return ff;
1961 }
1962
fuse_write_inode(struct inode * inode,struct writeback_control * wbc)1963 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
1964 {
1965 struct fuse_inode *fi = get_fuse_inode(inode);
1966 struct fuse_file *ff;
1967 int err;
1968
1969 /*
1970 * Inode is always written before the last reference is dropped and
1971 * hence this should not be reached from reclaim.
1972 *
1973 * Writing back the inode from reclaim can deadlock if the request
1974 * processing itself needs an allocation. Allocations triggering
1975 * reclaim while serving a request can't be prevented, because it can
1976 * involve any number of unrelated userspace processes.
1977 */
1978 WARN_ON(wbc->for_reclaim);
1979
1980 ff = __fuse_write_file_get(fi);
1981 err = fuse_flush_times(inode, ff);
1982 if (ff)
1983 fuse_file_put(ff, false);
1984
1985 return err;
1986 }
1987
fuse_writepage_args_alloc(void)1988 static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
1989 {
1990 struct fuse_writepage_args *wpa;
1991 struct fuse_args_pages *ap;
1992
1993 wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
1994 if (wpa) {
1995 ap = &wpa->ia.ap;
1996 ap->num_folios = 0;
1997 ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->descs);
1998 if (!ap->folios) {
1999 kfree(wpa);
2000 wpa = NULL;
2001 }
2002 }
2003 return wpa;
2004
2005 }
2006
fuse_writepage_add_to_bucket(struct fuse_conn * fc,struct fuse_writepage_args * wpa)2007 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
2008 struct fuse_writepage_args *wpa)
2009 {
2010 if (!fc->sync_fs)
2011 return;
2012
2013 rcu_read_lock();
2014 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2015 do {
2016 wpa->bucket = rcu_dereference(fc->curr_bucket);
2017 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
2018 rcu_read_unlock();
2019 }
2020
fuse_writepage_args_page_fill(struct fuse_writepage_args * wpa,struct folio * folio,uint32_t folio_index,loff_t offset,unsigned len)2021 static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
2022 uint32_t folio_index, loff_t offset, unsigned len)
2023 {
2024 struct inode *inode = folio->mapping->host;
2025 struct fuse_args_pages *ap = &wpa->ia.ap;
2026
2027 ap->folios[folio_index] = folio;
2028 ap->descs[folio_index].offset = offset;
2029 ap->descs[folio_index].length = len;
2030
2031 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
2032 }
2033
fuse_writepage_args_setup(struct folio * folio,size_t offset,struct fuse_file * ff)2034 static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
2035 size_t offset,
2036 struct fuse_file *ff)
2037 {
2038 struct inode *inode = folio->mapping->host;
2039 struct fuse_conn *fc = get_fuse_conn(inode);
2040 struct fuse_writepage_args *wpa;
2041 struct fuse_args_pages *ap;
2042
2043 wpa = fuse_writepage_args_alloc();
2044 if (!wpa)
2045 return NULL;
2046
2047 fuse_writepage_add_to_bucket(fc, wpa);
2048 fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0);
2049 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2050 wpa->inode = inode;
2051 wpa->ia.ff = ff;
2052
2053 ap = &wpa->ia.ap;
2054 ap->args.in_pages = true;
2055 ap->args.end = fuse_writepage_end;
2056
2057 return wpa;
2058 }
2059
2060 struct fuse_fill_wb_data {
2061 struct fuse_writepage_args *wpa;
2062 struct fuse_file *ff;
2063 unsigned int max_folios;
2064 /*
2065 * nr_bytes won't overflow since fuse_writepage_need_send() caps
2066 * wb requests to never exceed fc->max_pages (which has an upper bound
2067 * of U16_MAX).
2068 */
2069 unsigned int nr_bytes;
2070 };
2071
fuse_pages_realloc(struct fuse_fill_wb_data * data,unsigned int max_pages)2072 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data,
2073 unsigned int max_pages)
2074 {
2075 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2076 struct folio **folios;
2077 struct fuse_folio_desc *descs;
2078 unsigned int nfolios = min_t(unsigned int,
2079 max_t(unsigned int, data->max_folios * 2,
2080 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2081 max_pages);
2082 WARN_ON(nfolios <= data->max_folios);
2083
2084 folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs);
2085 if (!folios)
2086 return false;
2087
2088 memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios);
2089 memcpy(descs, ap->descs, sizeof(struct fuse_folio_desc) * ap->num_folios);
2090 kfree(ap->folios);
2091 ap->folios = folios;
2092 ap->descs = descs;
2093 data->max_folios = nfolios;
2094
2095 return true;
2096 }
2097
fuse_writepages_send(struct inode * inode,struct fuse_fill_wb_data * data)2098 static void fuse_writepages_send(struct inode *inode,
2099 struct fuse_fill_wb_data *data)
2100 {
2101 struct fuse_writepage_args *wpa = data->wpa;
2102 struct fuse_inode *fi = get_fuse_inode(inode);
2103
2104 spin_lock(&fi->lock);
2105 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2106 fuse_flush_writepages(inode);
2107 spin_unlock(&fi->lock);
2108 }
2109
fuse_writepage_need_send(struct fuse_conn * fc,loff_t pos,unsigned len,struct fuse_args_pages * ap,struct fuse_fill_wb_data * data)2110 static bool fuse_writepage_need_send(struct fuse_conn *fc, loff_t pos,
2111 unsigned len, struct fuse_args_pages *ap,
2112 struct fuse_fill_wb_data *data)
2113 {
2114 struct folio *prev_folio;
2115 struct fuse_folio_desc prev_desc;
2116 unsigned bytes = data->nr_bytes + len;
2117 loff_t prev_pos;
2118
2119 WARN_ON(!ap->num_folios);
2120
2121 /* Reached max pages */
2122 if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages)
2123 return true;
2124
2125 /* Reached max write bytes */
2126 if (bytes > fc->max_write)
2127 return true;
2128
2129 /* Discontinuity */
2130 prev_folio = ap->folios[ap->num_folios - 1];
2131 prev_desc = ap->descs[ap->num_folios - 1];
2132 prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length;
2133 if (prev_pos != pos)
2134 return true;
2135
2136 /* Need to grow the pages array? If so, did the expansion fail? */
2137 if (ap->num_folios == data->max_folios &&
2138 !fuse_pages_realloc(data, fc->max_pages))
2139 return true;
2140
2141 return false;
2142 }
2143
fuse_iomap_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 pos,unsigned len,u64 end_pos)2144 static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc,
2145 struct folio *folio, u64 pos,
2146 unsigned len, u64 end_pos)
2147 {
2148 struct fuse_fill_wb_data *data = wpc->wb_ctx;
2149 struct fuse_writepage_args *wpa = data->wpa;
2150 struct fuse_args_pages *ap = &wpa->ia.ap;
2151 struct inode *inode = wpc->inode;
2152 struct fuse_inode *fi = get_fuse_inode(inode);
2153 struct fuse_conn *fc = get_fuse_conn(inode);
2154 loff_t offset = offset_in_folio(folio, pos);
2155
2156 WARN_ON_ONCE(!data);
2157
2158 if (!data->ff) {
2159 data->ff = fuse_write_file_get(fi);
2160 if (!data->ff)
2161 return -EIO;
2162 }
2163
2164 if (wpa && fuse_writepage_need_send(fc, pos, len, ap, data)) {
2165 fuse_writepages_send(inode, data);
2166 data->wpa = NULL;
2167 data->nr_bytes = 0;
2168 }
2169
2170 if (data->wpa == NULL) {
2171 wpa = fuse_writepage_args_setup(folio, offset, data->ff);
2172 if (!wpa)
2173 return -ENOMEM;
2174 fuse_file_get(wpa->ia.ff);
2175 data->max_folios = 1;
2176 ap = &wpa->ia.ap;
2177 }
2178
2179 iomap_start_folio_write(inode, folio, 1);
2180 fuse_writepage_args_page_fill(wpa, folio, ap->num_folios,
2181 offset, len);
2182 data->nr_bytes += len;
2183
2184 ap->num_folios++;
2185 if (!data->wpa)
2186 data->wpa = wpa;
2187
2188 return len;
2189 }
2190
fuse_iomap_writeback_submit(struct iomap_writepage_ctx * wpc,int error)2191 static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc,
2192 int error)
2193 {
2194 struct fuse_fill_wb_data *data = wpc->wb_ctx;
2195
2196 WARN_ON_ONCE(!data);
2197
2198 if (data->wpa) {
2199 WARN_ON(!data->wpa->ia.ap.num_folios);
2200 fuse_writepages_send(wpc->inode, data);
2201 }
2202
2203 if (data->ff)
2204 fuse_file_put(data->ff, false);
2205
2206 return error;
2207 }
2208
2209 static const struct iomap_writeback_ops fuse_writeback_ops = {
2210 .writeback_range = fuse_iomap_writeback_range,
2211 .writeback_submit = fuse_iomap_writeback_submit,
2212 };
2213
fuse_writepages(struct address_space * mapping,struct writeback_control * wbc)2214 static int fuse_writepages(struct address_space *mapping,
2215 struct writeback_control *wbc)
2216 {
2217 struct inode *inode = mapping->host;
2218 struct fuse_conn *fc = get_fuse_conn(inode);
2219 struct fuse_fill_wb_data data = {};
2220 struct iomap_writepage_ctx wpc = {
2221 .inode = inode,
2222 .iomap.type = IOMAP_MAPPED,
2223 .wbc = wbc,
2224 .ops = &fuse_writeback_ops,
2225 .wb_ctx = &data,
2226 };
2227
2228 if (fuse_is_bad(inode))
2229 return -EIO;
2230
2231 if (wbc->sync_mode == WB_SYNC_NONE &&
2232 fc->num_background >= fc->congestion_threshold)
2233 return 0;
2234
2235 return iomap_writepages(&wpc);
2236 }
2237
fuse_launder_folio(struct folio * folio)2238 static int fuse_launder_folio(struct folio *folio)
2239 {
2240 int err = 0;
2241 struct fuse_fill_wb_data data = {};
2242 struct iomap_writepage_ctx wpc = {
2243 .inode = folio->mapping->host,
2244 .iomap.type = IOMAP_MAPPED,
2245 .ops = &fuse_writeback_ops,
2246 .wb_ctx = &data,
2247 };
2248
2249 if (folio_clear_dirty_for_io(folio)) {
2250 err = iomap_writeback_folio(&wpc, folio);
2251 err = fuse_iomap_writeback_submit(&wpc, err);
2252 if (!err)
2253 folio_wait_writeback(folio);
2254 }
2255 return err;
2256 }
2257
2258 /*
2259 * Write back dirty data/metadata now (there may not be any suitable
2260 * open files later for data)
2261 */
fuse_vma_close(struct vm_area_struct * vma)2262 static void fuse_vma_close(struct vm_area_struct *vma)
2263 {
2264 int err;
2265
2266 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2267 mapping_set_error(vma->vm_file->f_mapping, err);
2268 }
2269
2270 /*
2271 * Wait for writeback against this page to complete before allowing it
2272 * to be marked dirty again, and hence written back again, possibly
2273 * before the previous writepage completed.
2274 *
2275 * Block here, instead of in ->writepage(), so that the userspace fs
2276 * can only block processes actually operating on the filesystem.
2277 *
2278 * Otherwise unprivileged userspace fs would be able to block
2279 * unrelated:
2280 *
2281 * - page migration
2282 * - sync(2)
2283 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2284 */
fuse_page_mkwrite(struct vm_fault * vmf)2285 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
2286 {
2287 struct folio *folio = page_folio(vmf->page);
2288 struct inode *inode = file_inode(vmf->vma->vm_file);
2289
2290 file_update_time(vmf->vma->vm_file);
2291 folio_lock(folio);
2292 if (folio->mapping != inode->i_mapping) {
2293 folio_unlock(folio);
2294 return VM_FAULT_NOPAGE;
2295 }
2296
2297 folio_wait_writeback(folio);
2298 return VM_FAULT_LOCKED;
2299 }
2300
2301 static const struct vm_operations_struct fuse_file_vm_ops = {
2302 .close = fuse_vma_close,
2303 .fault = filemap_fault,
2304 .map_pages = filemap_map_pages,
2305 .page_mkwrite = fuse_page_mkwrite,
2306 };
2307
fuse_file_mmap(struct file * file,struct vm_area_struct * vma)2308 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2309 {
2310 struct fuse_file *ff = file->private_data;
2311 struct fuse_conn *fc = ff->fm->fc;
2312 struct inode *inode = file_inode(file);
2313 int rc;
2314
2315 /* DAX mmap is superior to direct_io mmap */
2316 if (FUSE_IS_DAX(inode))
2317 return fuse_dax_mmap(file, vma);
2318
2319 /*
2320 * If inode is in passthrough io mode, because it has some file open
2321 * in passthrough mode, either mmap to backing file or fail mmap,
2322 * because mixing cached mmap and passthrough io mode is not allowed.
2323 */
2324 if (fuse_file_passthrough(ff))
2325 return fuse_passthrough_mmap(file, vma);
2326 else if (fuse_inode_backing(get_fuse_inode(inode)))
2327 return -ENODEV;
2328
2329 /*
2330 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2331 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2332 */
2333 if (ff->open_flags & FOPEN_DIRECT_IO) {
2334 /*
2335 * Can't provide the coherency needed for MAP_SHARED
2336 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
2337 */
2338 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
2339 return -ENODEV;
2340
2341 invalidate_inode_pages2(file->f_mapping);
2342
2343 if (!(vma->vm_flags & VM_MAYSHARE)) {
2344 /* MAP_PRIVATE */
2345 return generic_file_mmap(file, vma);
2346 }
2347
2348 /*
2349 * First mmap of direct_io file enters caching inode io mode.
2350 * Also waits for parallel dio writers to go into serial mode
2351 * (exclusive instead of shared lock).
2352 * After first mmap, the inode stays in caching io mode until
2353 * the direct_io file release.
2354 */
2355 rc = fuse_file_cached_io_open(inode, ff);
2356 if (rc)
2357 return rc;
2358 }
2359
2360 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2361 fuse_link_write_file(file);
2362
2363 file_accessed(file);
2364 vma->vm_ops = &fuse_file_vm_ops;
2365 return 0;
2366 }
2367
convert_fuse_file_lock(struct fuse_conn * fc,const struct fuse_file_lock * ffl,struct file_lock * fl)2368 static int convert_fuse_file_lock(struct fuse_conn *fc,
2369 const struct fuse_file_lock *ffl,
2370 struct file_lock *fl)
2371 {
2372 switch (ffl->type) {
2373 case F_UNLCK:
2374 break;
2375
2376 case F_RDLCK:
2377 case F_WRLCK:
2378 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2379 ffl->end < ffl->start)
2380 return -EIO;
2381
2382 fl->fl_start = ffl->start;
2383 fl->fl_end = ffl->end;
2384
2385 /*
2386 * Convert pid into init's pid namespace. The locks API will
2387 * translate it into the caller's pid namespace.
2388 */
2389 rcu_read_lock();
2390 fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
2391 rcu_read_unlock();
2392 break;
2393
2394 default:
2395 return -EIO;
2396 }
2397 fl->c.flc_type = ffl->type;
2398 return 0;
2399 }
2400
fuse_lk_fill(struct fuse_args * args,struct file * file,const struct file_lock * fl,int opcode,pid_t pid,int flock,struct fuse_lk_in * inarg)2401 static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2402 const struct file_lock *fl, int opcode, pid_t pid,
2403 int flock, struct fuse_lk_in *inarg)
2404 {
2405 struct inode *inode = file_inode(file);
2406 struct fuse_conn *fc = get_fuse_conn(inode);
2407 struct fuse_file *ff = file->private_data;
2408
2409 memset(inarg, 0, sizeof(*inarg));
2410 inarg->fh = ff->fh;
2411 inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner);
2412 inarg->lk.start = fl->fl_start;
2413 inarg->lk.end = fl->fl_end;
2414 inarg->lk.type = fl->c.flc_type;
2415 inarg->lk.pid = pid;
2416 if (flock)
2417 inarg->lk_flags |= FUSE_LK_FLOCK;
2418 args->opcode = opcode;
2419 args->nodeid = get_node_id(inode);
2420 args->in_numargs = 1;
2421 args->in_args[0].size = sizeof(*inarg);
2422 args->in_args[0].value = inarg;
2423 }
2424
fuse_getlk(struct file * file,struct file_lock * fl)2425 static int fuse_getlk(struct file *file, struct file_lock *fl)
2426 {
2427 struct inode *inode = file_inode(file);
2428 struct fuse_mount *fm = get_fuse_mount(inode);
2429 FUSE_ARGS(args);
2430 struct fuse_lk_in inarg;
2431 struct fuse_lk_out outarg;
2432 int err;
2433
2434 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2435 args.out_numargs = 1;
2436 args.out_args[0].size = sizeof(outarg);
2437 args.out_args[0].value = &outarg;
2438 err = fuse_simple_request(fm, &args);
2439 if (!err)
2440 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
2441
2442 return err;
2443 }
2444
fuse_setlk(struct file * file,struct file_lock * fl,int flock)2445 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2446 {
2447 struct inode *inode = file_inode(file);
2448 struct fuse_mount *fm = get_fuse_mount(inode);
2449 FUSE_ARGS(args);
2450 struct fuse_lk_in inarg;
2451 int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2452 struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL;
2453 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
2454 int err;
2455
2456 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
2457 /* NLM needs asynchronous locks, which we don't support yet */
2458 return -ENOLCK;
2459 }
2460
2461 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
2462 err = fuse_simple_request(fm, &args);
2463
2464 /* locking is restartable */
2465 if (err == -EINTR)
2466 err = -ERESTARTSYS;
2467
2468 return err;
2469 }
2470
fuse_file_lock(struct file * file,int cmd,struct file_lock * fl)2471 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2472 {
2473 struct inode *inode = file_inode(file);
2474 struct fuse_conn *fc = get_fuse_conn(inode);
2475 int err;
2476
2477 if (cmd == F_CANCELLK) {
2478 err = 0;
2479 } else if (cmd == F_GETLK) {
2480 if (fc->no_lock) {
2481 posix_test_lock(file, fl);
2482 err = 0;
2483 } else
2484 err = fuse_getlk(file, fl);
2485 } else {
2486 if (fc->no_lock)
2487 err = posix_lock_file(file, fl, NULL);
2488 else
2489 err = fuse_setlk(file, fl, 0);
2490 }
2491 return err;
2492 }
2493
fuse_file_flock(struct file * file,int cmd,struct file_lock * fl)2494 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2495 {
2496 struct inode *inode = file_inode(file);
2497 struct fuse_conn *fc = get_fuse_conn(inode);
2498 int err;
2499
2500 if (fc->no_flock) {
2501 err = locks_lock_file_wait(file, fl);
2502 } else {
2503 struct fuse_file *ff = file->private_data;
2504
2505 /* emulate flock with POSIX locks */
2506 ff->flock = true;
2507 err = fuse_setlk(file, fl, 1);
2508 }
2509
2510 return err;
2511 }
2512
fuse_bmap(struct address_space * mapping,sector_t block)2513 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2514 {
2515 struct inode *inode = mapping->host;
2516 struct fuse_mount *fm = get_fuse_mount(inode);
2517 FUSE_ARGS(args);
2518 struct fuse_bmap_in inarg;
2519 struct fuse_bmap_out outarg;
2520 int err;
2521
2522 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
2523 return 0;
2524
2525 memset(&inarg, 0, sizeof(inarg));
2526 inarg.block = block;
2527 inarg.blocksize = inode->i_sb->s_blocksize;
2528 args.opcode = FUSE_BMAP;
2529 args.nodeid = get_node_id(inode);
2530 args.in_numargs = 1;
2531 args.in_args[0].size = sizeof(inarg);
2532 args.in_args[0].value = &inarg;
2533 args.out_numargs = 1;
2534 args.out_args[0].size = sizeof(outarg);
2535 args.out_args[0].value = &outarg;
2536 err = fuse_simple_request(fm, &args);
2537 if (err == -ENOSYS)
2538 fm->fc->no_bmap = 1;
2539
2540 return err ? 0 : outarg.block;
2541 }
2542
fuse_lseek(struct file * file,loff_t offset,int whence)2543 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2544 {
2545 struct inode *inode = file->f_mapping->host;
2546 struct fuse_mount *fm = get_fuse_mount(inode);
2547 struct fuse_file *ff = file->private_data;
2548 FUSE_ARGS(args);
2549 struct fuse_lseek_in inarg = {
2550 .fh = ff->fh,
2551 .offset = offset,
2552 .whence = whence
2553 };
2554 struct fuse_lseek_out outarg;
2555 int err;
2556
2557 if (fm->fc->no_lseek)
2558 goto fallback;
2559
2560 args.opcode = FUSE_LSEEK;
2561 args.nodeid = ff->nodeid;
2562 args.in_numargs = 1;
2563 args.in_args[0].size = sizeof(inarg);
2564 args.in_args[0].value = &inarg;
2565 args.out_numargs = 1;
2566 args.out_args[0].size = sizeof(outarg);
2567 args.out_args[0].value = &outarg;
2568 err = fuse_simple_request(fm, &args);
2569 if (err) {
2570 if (err == -ENOSYS) {
2571 fm->fc->no_lseek = 1;
2572 goto fallback;
2573 }
2574 return err;
2575 }
2576
2577 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2578
2579 fallback:
2580 err = fuse_update_attributes(inode, file, STATX_SIZE);
2581 if (!err)
2582 return generic_file_llseek(file, offset, whence);
2583 else
2584 return err;
2585 }
2586
fuse_file_llseek(struct file * file,loff_t offset,int whence)2587 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2588 {
2589 loff_t retval;
2590 struct inode *inode = file_inode(file);
2591
2592 switch (whence) {
2593 case SEEK_SET:
2594 case SEEK_CUR:
2595 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2596 retval = generic_file_llseek(file, offset, whence);
2597 break;
2598 case SEEK_END:
2599 inode_lock(inode);
2600 retval = fuse_update_attributes(inode, file, STATX_SIZE);
2601 if (!retval)
2602 retval = generic_file_llseek(file, offset, whence);
2603 inode_unlock(inode);
2604 break;
2605 case SEEK_HOLE:
2606 case SEEK_DATA:
2607 inode_lock(inode);
2608 retval = fuse_lseek(file, offset, whence);
2609 inode_unlock(inode);
2610 break;
2611 default:
2612 retval = -EINVAL;
2613 }
2614
2615 return retval;
2616 }
2617
2618 /*
2619 * All files which have been polled are linked to RB tree
2620 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2621 * find the matching one.
2622 */
fuse_find_polled_node(struct fuse_conn * fc,u64 kh,struct rb_node ** parent_out)2623 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2624 struct rb_node **parent_out)
2625 {
2626 struct rb_node **link = &fc->polled_files.rb_node;
2627 struct rb_node *last = NULL;
2628
2629 while (*link) {
2630 struct fuse_file *ff;
2631
2632 last = *link;
2633 ff = rb_entry(last, struct fuse_file, polled_node);
2634
2635 if (kh < ff->kh)
2636 link = &last->rb_left;
2637 else if (kh > ff->kh)
2638 link = &last->rb_right;
2639 else
2640 return link;
2641 }
2642
2643 if (parent_out)
2644 *parent_out = last;
2645 return link;
2646 }
2647
2648 /*
2649 * The file is about to be polled. Make sure it's on the polled_files
2650 * RB tree. Note that files once added to the polled_files tree are
2651 * not removed before the file is released. This is because a file
2652 * polled once is likely to be polled again.
2653 */
fuse_register_polled_file(struct fuse_conn * fc,struct fuse_file * ff)2654 static void fuse_register_polled_file(struct fuse_conn *fc,
2655 struct fuse_file *ff)
2656 {
2657 spin_lock(&fc->lock);
2658 if (RB_EMPTY_NODE(&ff->polled_node)) {
2659 struct rb_node **link, *parent;
2660
2661 link = fuse_find_polled_node(fc, ff->kh, &parent);
2662 BUG_ON(*link);
2663 rb_link_node(&ff->polled_node, parent, link);
2664 rb_insert_color(&ff->polled_node, &fc->polled_files);
2665 }
2666 spin_unlock(&fc->lock);
2667 }
2668
fuse_file_poll(struct file * file,poll_table * wait)2669 __poll_t fuse_file_poll(struct file *file, poll_table *wait)
2670 {
2671 struct fuse_file *ff = file->private_data;
2672 struct fuse_mount *fm = ff->fm;
2673 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2674 struct fuse_poll_out outarg;
2675 FUSE_ARGS(args);
2676 int err;
2677
2678 if (fm->fc->no_poll)
2679 return DEFAULT_POLLMASK;
2680
2681 poll_wait(file, &ff->poll_wait, wait);
2682 inarg.events = mangle_poll(poll_requested_events(wait));
2683
2684 /*
2685 * Ask for notification iff there's someone waiting for it.
2686 * The client may ignore the flag and always notify.
2687 */
2688 if (waitqueue_active(&ff->poll_wait)) {
2689 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2690 fuse_register_polled_file(fm->fc, ff);
2691 }
2692
2693 args.opcode = FUSE_POLL;
2694 args.nodeid = ff->nodeid;
2695 args.in_numargs = 1;
2696 args.in_args[0].size = sizeof(inarg);
2697 args.in_args[0].value = &inarg;
2698 args.out_numargs = 1;
2699 args.out_args[0].size = sizeof(outarg);
2700 args.out_args[0].value = &outarg;
2701 err = fuse_simple_request(fm, &args);
2702
2703 if (!err)
2704 return demangle_poll(outarg.revents);
2705 if (err == -ENOSYS) {
2706 fm->fc->no_poll = 1;
2707 return DEFAULT_POLLMASK;
2708 }
2709 return EPOLLERR;
2710 }
2711 EXPORT_SYMBOL_GPL(fuse_file_poll);
2712
2713 /*
2714 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2715 * wakes up the poll waiters.
2716 */
fuse_notify_poll_wakeup(struct fuse_conn * fc,struct fuse_notify_poll_wakeup_out * outarg)2717 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2718 struct fuse_notify_poll_wakeup_out *outarg)
2719 {
2720 u64 kh = outarg->kh;
2721 struct rb_node **link;
2722
2723 spin_lock(&fc->lock);
2724
2725 link = fuse_find_polled_node(fc, kh, NULL);
2726 if (*link) {
2727 struct fuse_file *ff;
2728
2729 ff = rb_entry(*link, struct fuse_file, polled_node);
2730 wake_up_interruptible_sync(&ff->poll_wait);
2731 }
2732
2733 spin_unlock(&fc->lock);
2734 return 0;
2735 }
2736
fuse_do_truncate(struct file * file)2737 static void fuse_do_truncate(struct file *file)
2738 {
2739 struct inode *inode = file->f_mapping->host;
2740 struct iattr attr;
2741
2742 attr.ia_valid = ATTR_SIZE;
2743 attr.ia_size = i_size_read(inode);
2744
2745 attr.ia_file = file;
2746 attr.ia_valid |= ATTR_FILE;
2747
2748 fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file);
2749 }
2750
fuse_round_up(struct fuse_conn * fc,loff_t off)2751 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
2752 {
2753 return round_up(off, fc->max_pages << PAGE_SHIFT);
2754 }
2755
2756 static ssize_t
fuse_direct_IO(struct kiocb * iocb,struct iov_iter * iter)2757 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2758 {
2759 DECLARE_COMPLETION_ONSTACK(wait);
2760 ssize_t ret = 0;
2761 struct file *file = iocb->ki_filp;
2762 struct fuse_file *ff = file->private_data;
2763 loff_t pos = 0;
2764 struct inode *inode;
2765 loff_t i_size;
2766 size_t count = iov_iter_count(iter), shortened = 0;
2767 loff_t offset = iocb->ki_pos;
2768 struct fuse_io_priv *io;
2769
2770 pos = offset;
2771 inode = file->f_mapping->host;
2772 i_size = i_size_read(inode);
2773
2774 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
2775 return 0;
2776
2777 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2778 if (!io)
2779 return -ENOMEM;
2780 spin_lock_init(&io->lock);
2781 kref_init(&io->refcnt);
2782 io->reqs = 1;
2783 io->bytes = -1;
2784 io->size = 0;
2785 io->offset = offset;
2786 io->write = (iov_iter_rw(iter) == WRITE);
2787 io->err = 0;
2788 /*
2789 * By default, we want to optimize all I/Os with async request
2790 * submission to the client filesystem if supported.
2791 */
2792 io->async = ff->fm->fc->async_dio;
2793 io->iocb = iocb;
2794 io->blocking = is_sync_kiocb(iocb);
2795
2796 /* optimization for short read */
2797 if (io->async && !io->write && offset + count > i_size) {
2798 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
2799 shortened = count - iov_iter_count(iter);
2800 count -= shortened;
2801 }
2802
2803 /*
2804 * We cannot asynchronously extend the size of a file.
2805 * In such case the aio will behave exactly like sync io.
2806 */
2807 if ((offset + count > i_size) && io->write)
2808 io->blocking = true;
2809
2810 if (io->async && io->blocking) {
2811 /*
2812 * Additional reference to keep io around after
2813 * calling fuse_aio_complete()
2814 */
2815 kref_get(&io->refcnt);
2816 io->done = &wait;
2817 }
2818
2819 if (iov_iter_rw(iter) == WRITE) {
2820 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
2821 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
2822 } else {
2823 ret = __fuse_direct_read(io, iter, &pos);
2824 }
2825 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
2826
2827 if (io->async) {
2828 bool blocking = io->blocking;
2829
2830 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2831
2832 /* we have a non-extending, async request, so return */
2833 if (!blocking)
2834 return -EIOCBQUEUED;
2835
2836 wait_for_completion(&wait);
2837 ret = fuse_get_res_by_io(io);
2838 }
2839
2840 kref_put(&io->refcnt, fuse_io_release);
2841
2842 if (iov_iter_rw(iter) == WRITE) {
2843 fuse_write_update_attr(inode, pos, ret);
2844 /* For extending writes we already hold exclusive lock */
2845 if (ret < 0 && offset + count > i_size)
2846 fuse_do_truncate(file);
2847 }
2848
2849 return ret;
2850 }
2851
fuse_writeback_range(struct inode * inode,loff_t start,loff_t end)2852 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
2853 {
2854 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
2855
2856 if (!err)
2857 fuse_sync_writes(inode);
2858
2859 return err;
2860 }
2861
fuse_file_fallocate(struct file * file,int mode,loff_t offset,loff_t length)2862 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2863 loff_t length)
2864 {
2865 struct fuse_file *ff = file->private_data;
2866 struct inode *inode = file_inode(file);
2867 struct fuse_inode *fi = get_fuse_inode(inode);
2868 struct fuse_mount *fm = ff->fm;
2869 FUSE_ARGS(args);
2870 struct fuse_fallocate_in inarg = {
2871 .fh = ff->fh,
2872 .offset = offset,
2873 .length = length,
2874 .mode = mode
2875 };
2876 int err;
2877 bool block_faults = FUSE_IS_DAX(inode) &&
2878 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2879 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
2880
2881 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
2882 FALLOC_FL_ZERO_RANGE))
2883 return -EOPNOTSUPP;
2884
2885 if (fm->fc->no_fallocate)
2886 return -EOPNOTSUPP;
2887
2888 inode_lock(inode);
2889 if (block_faults) {
2890 filemap_invalidate_lock(inode->i_mapping);
2891 err = fuse_dax_break_layouts(inode, 0, -1);
2892 if (err)
2893 goto out;
2894 }
2895
2896 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
2897 loff_t endbyte = offset + length - 1;
2898
2899 err = fuse_writeback_range(inode, offset, endbyte);
2900 if (err)
2901 goto out;
2902 }
2903
2904 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2905 offset + length > i_size_read(inode)) {
2906 err = inode_newsize_ok(inode, offset + length);
2907 if (err)
2908 goto out;
2909 }
2910
2911 err = file_modified(file);
2912 if (err)
2913 goto out;
2914
2915 if (!(mode & FALLOC_FL_KEEP_SIZE))
2916 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2917
2918 args.opcode = FUSE_FALLOCATE;
2919 args.nodeid = ff->nodeid;
2920 args.in_numargs = 1;
2921 args.in_args[0].size = sizeof(inarg);
2922 args.in_args[0].value = &inarg;
2923 err = fuse_simple_request(fm, &args);
2924 if (err == -ENOSYS) {
2925 fm->fc->no_fallocate = 1;
2926 err = -EOPNOTSUPP;
2927 }
2928 if (err)
2929 goto out;
2930
2931 /* we could have extended the file */
2932 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
2933 if (fuse_write_update_attr(inode, offset + length, length))
2934 file_update_time(file);
2935 }
2936
2937 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
2938 truncate_pagecache_range(inode, offset, offset + length - 1);
2939
2940 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
2941
2942 out:
2943 if (!(mode & FALLOC_FL_KEEP_SIZE))
2944 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2945
2946 if (block_faults)
2947 filemap_invalidate_unlock(inode->i_mapping);
2948
2949 inode_unlock(inode);
2950
2951 fuse_flush_time_update(inode);
2952
2953 return err;
2954 }
2955
__fuse_copy_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len,unsigned int flags)2956 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
2957 struct file *file_out, loff_t pos_out,
2958 size_t len, unsigned int flags)
2959 {
2960 struct fuse_file *ff_in = file_in->private_data;
2961 struct fuse_file *ff_out = file_out->private_data;
2962 struct inode *inode_in = file_inode(file_in);
2963 struct inode *inode_out = file_inode(file_out);
2964 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
2965 struct fuse_mount *fm = ff_in->fm;
2966 struct fuse_conn *fc = fm->fc;
2967 FUSE_ARGS(args);
2968 struct fuse_copy_file_range_in inarg = {
2969 .fh_in = ff_in->fh,
2970 .off_in = pos_in,
2971 .nodeid_out = ff_out->nodeid,
2972 .fh_out = ff_out->fh,
2973 .off_out = pos_out,
2974 .len = len,
2975 .flags = flags
2976 };
2977 struct fuse_write_out outarg;
2978 ssize_t err;
2979 /* mark unstable when write-back is not used, and file_out gets
2980 * extended */
2981 bool is_unstable = (!fc->writeback_cache) &&
2982 ((pos_out + len) > inode_out->i_size);
2983
2984 if (fc->no_copy_file_range)
2985 return -EOPNOTSUPP;
2986
2987 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
2988 return -EXDEV;
2989
2990 inode_lock(inode_in);
2991 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
2992 inode_unlock(inode_in);
2993 if (err)
2994 return err;
2995
2996 inode_lock(inode_out);
2997
2998 err = file_modified(file_out);
2999 if (err)
3000 goto out;
3001
3002 /*
3003 * Write out dirty pages in the destination file before sending the COPY
3004 * request to userspace. After the request is completed, truncate off
3005 * pages (including partial ones) from the cache that have been copied,
3006 * since these contain stale data at that point.
3007 *
3008 * This should be mostly correct, but if the COPY writes to partial
3009 * pages (at the start or end) and the parts not covered by the COPY are
3010 * written through a memory map after calling fuse_writeback_range(),
3011 * then these partial page modifications will be lost on truncation.
3012 *
3013 * It is unlikely that someone would rely on such mixed style
3014 * modifications. Yet this does give less guarantees than if the
3015 * copying was performed with write(2).
3016 *
3017 * To fix this a mapping->invalidate_lock could be used to prevent new
3018 * faults while the copy is ongoing.
3019 */
3020 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3021 if (err)
3022 goto out;
3023
3024 if (is_unstable)
3025 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3026
3027 args.opcode = FUSE_COPY_FILE_RANGE;
3028 args.nodeid = ff_in->nodeid;
3029 args.in_numargs = 1;
3030 args.in_args[0].size = sizeof(inarg);
3031 args.in_args[0].value = &inarg;
3032 args.out_numargs = 1;
3033 args.out_args[0].size = sizeof(outarg);
3034 args.out_args[0].value = &outarg;
3035 err = fuse_simple_request(fm, &args);
3036 if (err == -ENOSYS) {
3037 fc->no_copy_file_range = 1;
3038 err = -EOPNOTSUPP;
3039 }
3040 if (err)
3041 goto out;
3042
3043 truncate_inode_pages_range(inode_out->i_mapping,
3044 ALIGN_DOWN(pos_out, PAGE_SIZE),
3045 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
3046
3047 file_update_time(file_out);
3048 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size);
3049
3050 err = outarg.size;
3051 out:
3052 if (is_unstable)
3053 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3054
3055 inode_unlock(inode_out);
3056 file_accessed(file_in);
3057
3058 fuse_flush_time_update(inode_out);
3059
3060 return err;
3061 }
3062
fuse_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)3063 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3064 struct file *dst_file, loff_t dst_off,
3065 size_t len, unsigned int flags)
3066 {
3067 ssize_t ret;
3068
3069 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3070 len, flags);
3071
3072 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3073 ret = splice_copy_file_range(src_file, src_off, dst_file,
3074 dst_off, len);
3075 return ret;
3076 }
3077
3078 static const struct file_operations fuse_file_operations = {
3079 .llseek = fuse_file_llseek,
3080 .read_iter = fuse_file_read_iter,
3081 .write_iter = fuse_file_write_iter,
3082 .mmap = fuse_file_mmap,
3083 .open = fuse_open,
3084 .flush = fuse_flush,
3085 .release = fuse_release,
3086 .fsync = fuse_fsync,
3087 .lock = fuse_file_lock,
3088 .get_unmapped_area = thp_get_unmapped_area,
3089 .flock = fuse_file_flock,
3090 .splice_read = fuse_splice_read,
3091 .splice_write = fuse_splice_write,
3092 .unlocked_ioctl = fuse_file_ioctl,
3093 .compat_ioctl = fuse_file_compat_ioctl,
3094 .poll = fuse_file_poll,
3095 .fallocate = fuse_file_fallocate,
3096 .copy_file_range = fuse_copy_file_range,
3097 };
3098
3099 static const struct address_space_operations fuse_file_aops = {
3100 .read_folio = fuse_read_folio,
3101 .readahead = fuse_readahead,
3102 .writepages = fuse_writepages,
3103 .launder_folio = fuse_launder_folio,
3104 .dirty_folio = iomap_dirty_folio,
3105 .release_folio = iomap_release_folio,
3106 .invalidate_folio = iomap_invalidate_folio,
3107 .is_partially_uptodate = iomap_is_partially_uptodate,
3108 .migrate_folio = filemap_migrate_folio,
3109 .bmap = fuse_bmap,
3110 .direct_IO = fuse_direct_IO,
3111 };
3112
fuse_init_file_inode(struct inode * inode,unsigned int flags)3113 void fuse_init_file_inode(struct inode *inode, unsigned int flags)
3114 {
3115 struct fuse_inode *fi = get_fuse_inode(inode);
3116 struct fuse_conn *fc = get_fuse_conn(inode);
3117
3118 inode->i_fop = &fuse_file_operations;
3119 inode->i_data.a_ops = &fuse_file_aops;
3120 if (fc->writeback_cache)
3121 mapping_set_writeback_may_deadlock_on_reclaim(&inode->i_data);
3122
3123 INIT_LIST_HEAD(&fi->write_files);
3124 INIT_LIST_HEAD(&fi->queued_writes);
3125 fi->writectr = 0;
3126 fi->iocachectr = 0;
3127 init_waitqueue_head(&fi->page_waitq);
3128 init_waitqueue_head(&fi->direct_io_waitq);
3129
3130 if (IS_ENABLED(CONFIG_FUSE_DAX))
3131 fuse_dax_inode_init(inode, flags);
3132 }
3133