1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/sched/signal.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/falloc.h> 19 #include <linux/uio.h> 20 #include <linux/fs.h> 21 #include <linux/filelock.h> 22 #include <linux/splice.h> 23 #include <linux/task_io_accounting_ops.h> 24 #include <linux/iomap.h> 25 26 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, 27 unsigned int open_flags, int opcode, 28 struct fuse_open_out *outargp) 29 { 30 struct fuse_open_in inarg; 31 FUSE_ARGS(args); 32 33 memset(&inarg, 0, sizeof(inarg)); 34 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 35 if (!fm->fc->atomic_o_trunc) 36 inarg.flags &= ~O_TRUNC; 37 38 if (fm->fc->handle_killpriv_v2 && 39 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) { 40 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID; 41 } 42 43 args.opcode = opcode; 44 args.nodeid = nodeid; 45 args.in_numargs = 1; 46 args.in_args[0].size = sizeof(inarg); 47 args.in_args[0].value = &inarg; 48 args.out_numargs = 1; 49 args.out_args[0].size = sizeof(*outargp); 50 args.out_args[0].value = outargp; 51 52 return fuse_simple_request(fm, &args); 53 } 54 55 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) 56 { 57 struct fuse_file *ff; 58 59 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT); 60 if (unlikely(!ff)) 61 return NULL; 62 63 ff->fm = fm; 64 if (release) { 65 ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT); 66 if (!ff->args) { 67 kfree(ff); 68 return NULL; 69 } 70 } 71 72 INIT_LIST_HEAD(&ff->write_entry); 73 refcount_set(&ff->count, 1); 74 RB_CLEAR_NODE(&ff->polled_node); 75 init_waitqueue_head(&ff->poll_wait); 76 77 ff->kh = atomic64_inc_return(&fm->fc->khctr); 78 79 return ff; 80 } 81 82 void fuse_file_free(struct fuse_file *ff) 83 { 84 kfree(ff->args); 85 kfree(ff); 86 } 87 88 static struct fuse_file *fuse_file_get(struct fuse_file *ff) 89 { 90 refcount_inc(&ff->count); 91 return ff; 92 } 93 94 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, 95 int error) 96 { 97 struct fuse_release_args *ra = container_of(args, typeof(*ra), args); 98 99 iput(ra->inode); 100 kfree(ra); 101 } 102 103 static void fuse_file_put(struct fuse_file *ff, bool sync) 104 { 105 if (refcount_dec_and_test(&ff->count)) { 106 struct fuse_release_args *ra = &ff->args->release_args; 107 struct fuse_args *args = (ra ? &ra->args : NULL); 108 109 if (ra && ra->inode) 110 fuse_file_io_release(ff, ra->inode); 111 112 if (!args) { 113 /* Do nothing when server does not implement 'open' */ 114 } else if (sync) { 115 fuse_simple_request(ff->fm, args); 116 fuse_release_end(ff->fm, args, 0); 117 } else { 118 args->end = fuse_release_end; 119 if (fuse_simple_background(ff->fm, args, 120 GFP_KERNEL | __GFP_NOFAIL)) 121 fuse_release_end(ff->fm, args, -ENOTCONN); 122 } 123 kfree(ff); 124 } 125 } 126 127 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, 128 unsigned int open_flags, bool isdir) 129 { 130 struct fuse_conn *fc = fm->fc; 131 struct fuse_file *ff; 132 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 133 bool open = isdir ? !fc->no_opendir : !fc->no_open; 134 135 ff = fuse_file_alloc(fm, open); 136 if (!ff) 137 return ERR_PTR(-ENOMEM); 138 139 ff->fh = 0; 140 /* Default for no-open */ 141 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); 142 if (open) { 143 /* Store outarg for fuse_finish_open() */ 144 struct fuse_open_out *outargp = &ff->args->open_outarg; 145 int err; 146 147 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp); 148 if (!err) { 149 ff->fh = outargp->fh; 150 ff->open_flags = outargp->open_flags; 151 } else if (err != -ENOSYS) { 152 fuse_file_free(ff); 153 return ERR_PTR(err); 154 } else { 155 /* No release needed */ 156 kfree(ff->args); 157 ff->args = NULL; 158 if (isdir) 159 fc->no_opendir = 1; 160 else 161 fc->no_open = 1; 162 } 163 } 164 165 if (isdir) 166 ff->open_flags &= ~FOPEN_DIRECT_IO; 167 168 ff->nodeid = nodeid; 169 170 return ff; 171 } 172 173 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file, 174 bool isdir) 175 { 176 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir); 177 178 if (!IS_ERR(ff)) 179 file->private_data = ff; 180 181 return PTR_ERR_OR_ZERO(ff); 182 } 183 EXPORT_SYMBOL_GPL(fuse_do_open); 184 185 static void fuse_link_write_file(struct file *file) 186 { 187 struct inode *inode = file_inode(file); 188 struct fuse_inode *fi = get_fuse_inode(inode); 189 struct fuse_file *ff = file->private_data; 190 /* 191 * file may be written through mmap, so chain it onto the 192 * inodes's write_file list 193 */ 194 spin_lock(&fi->lock); 195 if (list_empty(&ff->write_entry)) 196 list_add(&ff->write_entry, &fi->write_files); 197 spin_unlock(&fi->lock); 198 } 199 200 int fuse_finish_open(struct inode *inode, struct file *file) 201 { 202 struct fuse_file *ff = file->private_data; 203 struct fuse_conn *fc = get_fuse_conn(inode); 204 int err; 205 206 err = fuse_file_io_open(file, inode); 207 if (err) 208 return err; 209 210 if (ff->open_flags & FOPEN_STREAM) 211 stream_open(inode, file); 212 else if (ff->open_flags & FOPEN_NONSEEKABLE) 213 nonseekable_open(inode, file); 214 215 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 216 fuse_link_write_file(file); 217 218 return 0; 219 } 220 221 static void fuse_truncate_update_attr(struct inode *inode, struct file *file) 222 { 223 struct fuse_conn *fc = get_fuse_conn(inode); 224 struct fuse_inode *fi = get_fuse_inode(inode); 225 226 spin_lock(&fi->lock); 227 fi->attr_version = atomic64_inc_return(&fc->attr_version); 228 i_size_write(inode, 0); 229 spin_unlock(&fi->lock); 230 file_update_time(file); 231 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 232 } 233 234 static int fuse_open(struct inode *inode, struct file *file) 235 { 236 struct fuse_mount *fm = get_fuse_mount(inode); 237 struct fuse_inode *fi = get_fuse_inode(inode); 238 struct fuse_conn *fc = fm->fc; 239 struct fuse_file *ff; 240 int err; 241 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; 242 bool is_wb_truncate = is_truncate && fc->writeback_cache; 243 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode); 244 245 if (fuse_is_bad(inode)) 246 return -EIO; 247 248 err = generic_file_open(inode, file); 249 if (err) 250 return err; 251 252 if (is_wb_truncate || dax_truncate) 253 inode_lock(inode); 254 255 if (dax_truncate) { 256 filemap_invalidate_lock(inode->i_mapping); 257 err = fuse_dax_break_layouts(inode, 0, -1); 258 if (err) 259 goto out_inode_unlock; 260 } 261 262 if (is_wb_truncate || dax_truncate) 263 fuse_set_nowrite(inode); 264 265 err = fuse_do_open(fm, get_node_id(inode), file, false); 266 if (!err) { 267 ff = file->private_data; 268 err = fuse_finish_open(inode, file); 269 if (err) 270 fuse_sync_release(fi, ff, file->f_flags); 271 else if (is_truncate) 272 fuse_truncate_update_attr(inode, file); 273 } 274 275 if (is_wb_truncate || dax_truncate) 276 fuse_release_nowrite(inode); 277 if (!err) { 278 if (is_truncate) 279 truncate_pagecache(inode, 0); 280 else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 281 invalidate_inode_pages2(inode->i_mapping); 282 } 283 if (dax_truncate) 284 filemap_invalidate_unlock(inode->i_mapping); 285 out_inode_unlock: 286 if (is_wb_truncate || dax_truncate) 287 inode_unlock(inode); 288 289 return err; 290 } 291 292 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, 293 unsigned int flags, int opcode, bool sync) 294 { 295 struct fuse_conn *fc = ff->fm->fc; 296 struct fuse_release_args *ra = &ff->args->release_args; 297 298 if (fuse_file_passthrough(ff)) 299 fuse_passthrough_release(ff, fuse_inode_backing(fi)); 300 301 /* Inode is NULL on error path of fuse_create_open() */ 302 if (likely(fi)) { 303 spin_lock(&fi->lock); 304 list_del(&ff->write_entry); 305 spin_unlock(&fi->lock); 306 } 307 spin_lock(&fc->lock); 308 if (!RB_EMPTY_NODE(&ff->polled_node)) 309 rb_erase(&ff->polled_node, &fc->polled_files); 310 spin_unlock(&fc->lock); 311 312 wake_up_interruptible_all(&ff->poll_wait); 313 314 if (!ra) 315 return; 316 317 /* ff->args was used for open outarg */ 318 memset(ff->args, 0, sizeof(*ff->args)); 319 ra->inarg.fh = ff->fh; 320 ra->inarg.flags = flags; 321 ra->args.in_numargs = 1; 322 ra->args.in_args[0].size = sizeof(struct fuse_release_in); 323 ra->args.in_args[0].value = &ra->inarg; 324 ra->args.opcode = opcode; 325 ra->args.nodeid = ff->nodeid; 326 ra->args.force = true; 327 ra->args.nocreds = true; 328 329 /* 330 * Hold inode until release is finished. 331 * From fuse_sync_release() the refcount is 1 and everything's 332 * synchronous, so we are fine with not doing igrab() here. 333 */ 334 ra->inode = sync ? NULL : igrab(&fi->inode); 335 } 336 337 void fuse_file_release(struct inode *inode, struct fuse_file *ff, 338 unsigned int open_flags, fl_owner_t id, bool isdir) 339 { 340 struct fuse_inode *fi = get_fuse_inode(inode); 341 struct fuse_release_args *ra = &ff->args->release_args; 342 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; 343 344 fuse_prepare_release(fi, ff, open_flags, opcode, false); 345 346 if (ra && ff->flock) { 347 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 348 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); 349 } 350 351 /* 352 * Normally this will send the RELEASE request, however if 353 * some asynchronous READ or WRITE requests are outstanding, 354 * the sending will be delayed. 355 * 356 * Make the release synchronous if this is a fuseblk mount, 357 * synchronous RELEASE is allowed (and desirable) in this case 358 * because the server can be trusted not to screw up. 359 * 360 * Always use the asynchronous file put because the current thread 361 * might be the fuse server. This can happen if a process starts some 362 * aio and closes the fd before the aio completes. Since aio takes its 363 * own ref to the file, the IO completion has to drop the ref, which is 364 * how the fuse server can end up closing its clients' files. 365 */ 366 fuse_file_put(ff, false); 367 } 368 369 void fuse_release_common(struct file *file, bool isdir) 370 { 371 fuse_file_release(file_inode(file), file->private_data, file->f_flags, 372 (fl_owner_t) file, isdir); 373 } 374 375 static int fuse_release(struct inode *inode, struct file *file) 376 { 377 struct fuse_conn *fc = get_fuse_conn(inode); 378 379 /* 380 * Dirty pages might remain despite write_inode_now() call from 381 * fuse_flush() due to writes racing with the close. 382 */ 383 if (fc->writeback_cache) 384 write_inode_now(inode, 1); 385 386 fuse_release_common(file, false); 387 388 /* return value is ignored by VFS */ 389 return 0; 390 } 391 392 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, 393 unsigned int flags) 394 { 395 WARN_ON(refcount_read(&ff->count) > 1); 396 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); 397 fuse_file_put(ff, true); 398 } 399 EXPORT_SYMBOL_GPL(fuse_sync_release); 400 401 /* 402 * Scramble the ID space with XTEA, so that the value of the files_struct 403 * pointer is not exposed to userspace. 404 */ 405 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 406 { 407 u32 *k = fc->scramble_key; 408 u64 v = (unsigned long) id; 409 u32 v0 = v; 410 u32 v1 = v >> 32; 411 u32 sum = 0; 412 int i; 413 414 for (i = 0; i < 32; i++) { 415 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 416 sum += 0x9E3779B9; 417 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 418 } 419 420 return (u64) v0 + ((u64) v1 << 32); 421 } 422 423 struct fuse_writepage_args { 424 struct fuse_io_args ia; 425 struct list_head queue_entry; 426 struct inode *inode; 427 struct fuse_sync_bucket *bucket; 428 }; 429 430 /* 431 * Wait for all pending writepages on the inode to finish. 432 * 433 * This is currently done by blocking further writes with FUSE_NOWRITE 434 * and waiting for all sent writes to complete. 435 * 436 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 437 * could conflict with truncation. 438 */ 439 static void fuse_sync_writes(struct inode *inode) 440 { 441 fuse_set_nowrite(inode); 442 fuse_release_nowrite(inode); 443 } 444 445 static int fuse_flush(struct file *file, fl_owner_t id) 446 { 447 struct inode *inode = file_inode(file); 448 struct fuse_mount *fm = get_fuse_mount(inode); 449 struct fuse_file *ff = file->private_data; 450 struct fuse_flush_in inarg; 451 FUSE_ARGS(args); 452 int err; 453 454 if (fuse_is_bad(inode)) 455 return -EIO; 456 457 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache) 458 return 0; 459 460 err = write_inode_now(inode, 1); 461 if (err) 462 return err; 463 464 err = filemap_check_errors(file->f_mapping); 465 if (err) 466 return err; 467 468 err = 0; 469 if (fm->fc->no_flush) 470 goto inval_attr_out; 471 472 memset(&inarg, 0, sizeof(inarg)); 473 inarg.fh = ff->fh; 474 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id); 475 args.opcode = FUSE_FLUSH; 476 args.nodeid = get_node_id(inode); 477 args.in_numargs = 1; 478 args.in_args[0].size = sizeof(inarg); 479 args.in_args[0].value = &inarg; 480 args.force = true; 481 482 err = fuse_simple_request(fm, &args); 483 if (err == -ENOSYS) { 484 fm->fc->no_flush = 1; 485 err = 0; 486 } 487 488 inval_attr_out: 489 /* 490 * In memory i_blocks is not maintained by fuse, if writeback cache is 491 * enabled, i_blocks from cached attr may not be accurate. 492 */ 493 if (!err && fm->fc->writeback_cache) 494 fuse_invalidate_attr_mask(inode, STATX_BLOCKS); 495 return err; 496 } 497 498 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 499 int datasync, int opcode) 500 { 501 struct inode *inode = file->f_mapping->host; 502 struct fuse_mount *fm = get_fuse_mount(inode); 503 struct fuse_file *ff = file->private_data; 504 FUSE_ARGS(args); 505 struct fuse_fsync_in inarg; 506 507 memset(&inarg, 0, sizeof(inarg)); 508 inarg.fh = ff->fh; 509 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0; 510 args.opcode = opcode; 511 args.nodeid = get_node_id(inode); 512 args.in_numargs = 1; 513 args.in_args[0].size = sizeof(inarg); 514 args.in_args[0].value = &inarg; 515 return fuse_simple_request(fm, &args); 516 } 517 518 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 519 int datasync) 520 { 521 struct inode *inode = file->f_mapping->host; 522 struct fuse_conn *fc = get_fuse_conn(inode); 523 int err; 524 525 if (fuse_is_bad(inode)) 526 return -EIO; 527 528 inode_lock(inode); 529 530 /* 531 * Start writeback against all dirty pages of the inode, then 532 * wait for all outstanding writes, before sending the FSYNC 533 * request. 534 */ 535 err = file_write_and_wait_range(file, start, end); 536 if (err) 537 goto out; 538 539 fuse_sync_writes(inode); 540 541 /* 542 * Due to implementation of fuse writeback 543 * file_write_and_wait_range() does not catch errors. 544 * We have to do this directly after fuse_sync_writes() 545 */ 546 err = file_check_and_advance_wb_err(file); 547 if (err) 548 goto out; 549 550 err = sync_inode_metadata(inode, 1); 551 if (err) 552 goto out; 553 554 if (fc->no_fsync) 555 goto out; 556 557 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); 558 if (err == -ENOSYS) { 559 fc->no_fsync = 1; 560 err = 0; 561 } 562 out: 563 inode_unlock(inode); 564 565 return err; 566 } 567 568 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, 569 size_t count, int opcode) 570 { 571 struct fuse_file *ff = file->private_data; 572 struct fuse_args *args = &ia->ap.args; 573 574 ia->read.in.fh = ff->fh; 575 ia->read.in.offset = pos; 576 ia->read.in.size = count; 577 ia->read.in.flags = file->f_flags; 578 args->opcode = opcode; 579 args->nodeid = ff->nodeid; 580 args->in_numargs = 1; 581 args->in_args[0].size = sizeof(ia->read.in); 582 args->in_args[0].value = &ia->read.in; 583 args->out_argvar = true; 584 args->out_numargs = 1; 585 args->out_args[0].size = count; 586 } 587 588 static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres, 589 bool should_dirty) 590 { 591 unsigned int i; 592 593 for (i = 0; i < ap->num_folios; i++) { 594 if (should_dirty) 595 folio_mark_dirty_lock(ap->folios[i]); 596 if (ap->args.is_pinned) 597 unpin_folio(ap->folios[i]); 598 } 599 600 if (nres > 0 && ap->args.invalidate_vmap) 601 invalidate_kernel_vmap_range(ap->args.vmap_base, nres); 602 } 603 604 static void fuse_io_release(struct kref *kref) 605 { 606 kfree(container_of(kref, struct fuse_io_priv, refcnt)); 607 } 608 609 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) 610 { 611 if (io->err) 612 return io->err; 613 614 if (io->bytes >= 0 && io->write) 615 return -EIO; 616 617 return io->bytes < 0 ? io->size : io->bytes; 618 } 619 620 /* 621 * In case of short read, the caller sets 'pos' to the position of 622 * actual end of fuse request in IO request. Otherwise, if bytes_requested 623 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 624 * 625 * An example: 626 * User requested DIO read of 64K. It was split into two 32K fuse requests, 627 * both submitted asynchronously. The first of them was ACKed by userspace as 628 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 629 * second request was ACKed as short, e.g. only 1K was read, resulting in 630 * pos == 33K. 631 * 632 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 633 * will be equal to the length of the longest contiguous fragment of 634 * transferred data starting from the beginning of IO request. 635 */ 636 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 637 { 638 int left; 639 640 spin_lock(&io->lock); 641 if (err) 642 io->err = io->err ? : err; 643 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 644 io->bytes = pos; 645 646 left = --io->reqs; 647 if (!left && io->blocking) 648 complete(io->done); 649 spin_unlock(&io->lock); 650 651 if (!left && !io->blocking) { 652 ssize_t res = fuse_get_res_by_io(io); 653 654 if (res >= 0) { 655 struct inode *inode = file_inode(io->iocb->ki_filp); 656 struct fuse_conn *fc = get_fuse_conn(inode); 657 struct fuse_inode *fi = get_fuse_inode(inode); 658 659 spin_lock(&fi->lock); 660 fi->attr_version = atomic64_inc_return(&fc->attr_version); 661 spin_unlock(&fi->lock); 662 } 663 664 io->iocb->ki_complete(io->iocb, res); 665 } 666 667 kref_put(&io->refcnt, fuse_io_release); 668 } 669 670 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, 671 unsigned int nfolios) 672 { 673 struct fuse_io_args *ia; 674 675 ia = kzalloc(sizeof(*ia), GFP_KERNEL); 676 if (ia) { 677 ia->io = io; 678 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, 679 &ia->ap.descs); 680 if (!ia->ap.folios) { 681 kfree(ia); 682 ia = NULL; 683 } 684 } 685 return ia; 686 } 687 688 static void fuse_io_free(struct fuse_io_args *ia) 689 { 690 kfree(ia->ap.folios); 691 kfree(ia); 692 } 693 694 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, 695 int err) 696 { 697 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 698 struct fuse_io_priv *io = ia->io; 699 ssize_t pos = -1; 700 size_t nres; 701 702 if (err) { 703 /* Nothing */ 704 } else if (io->write) { 705 if (ia->write.out.size > ia->write.in.size) { 706 err = -EIO; 707 } else { 708 nres = ia->write.out.size; 709 if (ia->write.in.size != ia->write.out.size) 710 pos = ia->write.in.offset - io->offset + 711 ia->write.out.size; 712 } 713 } else { 714 u32 outsize = args->out_args[0].size; 715 716 nres = outsize; 717 if (ia->read.in.size != outsize) 718 pos = ia->read.in.offset - io->offset + outsize; 719 } 720 721 fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty); 722 723 fuse_aio_complete(io, err, pos); 724 fuse_io_free(ia); 725 } 726 727 static ssize_t fuse_async_req_send(struct fuse_mount *fm, 728 struct fuse_io_args *ia, size_t num_bytes) 729 { 730 ssize_t err; 731 struct fuse_io_priv *io = ia->io; 732 733 spin_lock(&io->lock); 734 kref_get(&io->refcnt); 735 io->size += num_bytes; 736 io->reqs++; 737 spin_unlock(&io->lock); 738 739 ia->ap.args.end = fuse_aio_complete_req; 740 ia->ap.args.may_block = io->should_dirty; 741 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL); 742 if (err) 743 fuse_aio_complete_req(fm, &ia->ap.args, err); 744 745 return num_bytes; 746 } 747 748 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count, 749 fl_owner_t owner) 750 { 751 struct file *file = ia->io->iocb->ki_filp; 752 struct fuse_file *ff = file->private_data; 753 struct fuse_mount *fm = ff->fm; 754 755 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 756 if (owner != NULL) { 757 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER; 758 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner); 759 } 760 761 if (ia->io->async) 762 return fuse_async_req_send(fm, ia, count); 763 764 return fuse_simple_request(fm, &ia->ap.args); 765 } 766 767 static void fuse_read_update_size(struct inode *inode, loff_t size, 768 u64 attr_ver) 769 { 770 struct fuse_conn *fc = get_fuse_conn(inode); 771 struct fuse_inode *fi = get_fuse_inode(inode); 772 773 spin_lock(&fi->lock); 774 if (attr_ver >= fi->attr_version && size < inode->i_size && 775 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 776 fi->attr_version = atomic64_inc_return(&fc->attr_version); 777 i_size_write(inode, size); 778 } 779 spin_unlock(&fi->lock); 780 } 781 782 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, 783 struct fuse_args_pages *ap) 784 { 785 struct fuse_conn *fc = get_fuse_conn(inode); 786 787 /* 788 * If writeback_cache is enabled, a short read means there's a hole in 789 * the file. Some data after the hole is in page cache, but has not 790 * reached the client fs yet. So the hole is not present there. 791 */ 792 if (!fc->writeback_cache) { 793 loff_t pos = folio_pos(ap->folios[0]) + num_read; 794 fuse_read_update_size(inode, pos, attr_ver); 795 } 796 } 797 798 static int fuse_do_readfolio(struct file *file, struct folio *folio, 799 size_t off, size_t len) 800 { 801 struct inode *inode = folio->mapping->host; 802 struct fuse_mount *fm = get_fuse_mount(inode); 803 loff_t pos = folio_pos(folio) + off; 804 struct fuse_folio_desc desc = { 805 .offset = off, 806 .length = len, 807 }; 808 struct fuse_io_args ia = { 809 .ap.args.page_zeroing = true, 810 .ap.args.out_pages = true, 811 .ap.num_folios = 1, 812 .ap.folios = &folio, 813 .ap.descs = &desc, 814 }; 815 ssize_t res; 816 u64 attr_ver; 817 818 attr_ver = fuse_get_attr_version(fm->fc); 819 820 /* Don't overflow end offset */ 821 if (pos + (desc.length - 1) == LLONG_MAX) 822 desc.length--; 823 824 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ); 825 res = fuse_simple_request(fm, &ia.ap.args); 826 if (res < 0) 827 return res; 828 /* 829 * Short read means EOF. If file size is larger, truncate it 830 */ 831 if (res < desc.length) 832 fuse_short_read(inode, attr_ver, res, &ia.ap); 833 834 return 0; 835 } 836 837 static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 838 unsigned int flags, struct iomap *iomap, 839 struct iomap *srcmap) 840 { 841 iomap->type = IOMAP_MAPPED; 842 iomap->length = length; 843 iomap->offset = offset; 844 return 0; 845 } 846 847 static const struct iomap_ops fuse_iomap_ops = { 848 .iomap_begin = fuse_iomap_begin, 849 }; 850 851 struct fuse_fill_read_data { 852 struct file *file; 853 854 /* Fields below are used if sending the read request asynchronously */ 855 struct fuse_conn *fc; 856 struct fuse_io_args *ia; 857 unsigned int nr_bytes; 858 }; 859 860 /* forward declarations */ 861 static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos, 862 unsigned len, struct fuse_args_pages *ap, 863 unsigned cur_bytes, bool write); 864 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file, 865 unsigned int count, bool async); 866 867 static int fuse_handle_readahead(struct folio *folio, 868 struct readahead_control *rac, 869 struct fuse_fill_read_data *data, loff_t pos, 870 size_t len) 871 { 872 struct fuse_io_args *ia = data->ia; 873 size_t off = offset_in_folio(folio, pos); 874 struct fuse_conn *fc = data->fc; 875 struct fuse_args_pages *ap; 876 unsigned int nr_pages; 877 878 if (ia && fuse_folios_need_send(fc, pos, len, &ia->ap, data->nr_bytes, 879 false)) { 880 fuse_send_readpages(ia, data->file, data->nr_bytes, 881 fc->async_read); 882 data->nr_bytes = 0; 883 data->ia = NULL; 884 ia = NULL; 885 } 886 if (!ia) { 887 if (fc->num_background >= fc->congestion_threshold && 888 rac->ra->async_size >= readahead_count(rac)) 889 /* 890 * Congested and only async pages left, so skip the 891 * rest. 892 */ 893 return -EAGAIN; 894 895 nr_pages = min(fc->max_pages, readahead_count(rac)); 896 data->ia = fuse_io_alloc(NULL, nr_pages); 897 if (!data->ia) 898 return -ENOMEM; 899 ia = data->ia; 900 } 901 folio_get(folio); 902 ap = &ia->ap; 903 ap->folios[ap->num_folios] = folio; 904 ap->descs[ap->num_folios].offset = off; 905 ap->descs[ap->num_folios].length = len; 906 data->nr_bytes += len; 907 ap->num_folios++; 908 909 return 0; 910 } 911 912 static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter, 913 struct iomap_read_folio_ctx *ctx, 914 size_t len) 915 { 916 struct fuse_fill_read_data *data = ctx->read_ctx; 917 struct folio *folio = ctx->cur_folio; 918 loff_t pos = iter->pos; 919 size_t off = offset_in_folio(folio, pos); 920 struct file *file = data->file; 921 int ret; 922 923 if (ctx->rac) { 924 ret = fuse_handle_readahead(folio, ctx->rac, data, pos, len); 925 /* 926 * If fuse_handle_readahead was successful, fuse_readpages_end 927 * will do the iomap_finish_folio_read, else we need to call it 928 * here 929 */ 930 if (ret) 931 iomap_finish_folio_read(folio, off, len, ret); 932 } else { 933 /* 934 * for non-readahead read requests, do reads synchronously 935 * since it's not guaranteed that the server can handle 936 * out-of-order reads 937 */ 938 ret = fuse_do_readfolio(file, folio, off, len); 939 iomap_finish_folio_read(folio, off, len, ret); 940 } 941 return ret; 942 } 943 944 static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx) 945 { 946 struct fuse_fill_read_data *data = ctx->read_ctx; 947 948 if (data->ia) 949 fuse_send_readpages(data->ia, data->file, data->nr_bytes, 950 data->fc->async_read); 951 } 952 953 static const struct iomap_read_ops fuse_iomap_read_ops = { 954 .read_folio_range = fuse_iomap_read_folio_range_async, 955 .submit_read = fuse_iomap_read_submit, 956 }; 957 958 static int fuse_read_folio(struct file *file, struct folio *folio) 959 { 960 struct inode *inode = folio->mapping->host; 961 struct fuse_fill_read_data data = { 962 .file = file, 963 }; 964 struct iomap_read_folio_ctx ctx = { 965 .cur_folio = folio, 966 .ops = &fuse_iomap_read_ops, 967 .read_ctx = &data, 968 969 }; 970 971 if (fuse_is_bad(inode)) { 972 folio_unlock(folio); 973 return -EIO; 974 } 975 976 iomap_read_folio(&fuse_iomap_ops, &ctx); 977 fuse_invalidate_atime(inode); 978 return 0; 979 } 980 981 static int fuse_iomap_read_folio_range(const struct iomap_iter *iter, 982 struct folio *folio, loff_t pos, 983 size_t len) 984 { 985 struct file *file = iter->private; 986 size_t off = offset_in_folio(folio, pos); 987 988 return fuse_do_readfolio(file, folio, off, len); 989 } 990 991 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, 992 int err) 993 { 994 int i; 995 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 996 struct fuse_args_pages *ap = &ia->ap; 997 size_t count = ia->read.in.size; 998 size_t num_read = args->out_args[0].size; 999 struct address_space *mapping; 1000 struct inode *inode; 1001 1002 WARN_ON_ONCE(!ap->num_folios); 1003 mapping = ap->folios[0]->mapping; 1004 inode = mapping->host; 1005 1006 /* 1007 * Short read means EOF. If file size is larger, truncate it 1008 */ 1009 if (!err && num_read < count) 1010 fuse_short_read(inode, ia->read.attr_ver, num_read, ap); 1011 1012 fuse_invalidate_atime(inode); 1013 1014 for (i = 0; i < ap->num_folios; i++) { 1015 iomap_finish_folio_read(ap->folios[i], ap->descs[i].offset, 1016 ap->descs[i].length, err); 1017 folio_put(ap->folios[i]); 1018 } 1019 if (ia->ff) 1020 fuse_file_put(ia->ff, false); 1021 1022 fuse_io_free(ia); 1023 } 1024 1025 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file, 1026 unsigned int count, bool async) 1027 { 1028 struct fuse_file *ff = file->private_data; 1029 struct fuse_mount *fm = ff->fm; 1030 struct fuse_args_pages *ap = &ia->ap; 1031 loff_t pos = folio_pos(ap->folios[0]); 1032 ssize_t res; 1033 int err; 1034 1035 ap->args.out_pages = true; 1036 ap->args.page_zeroing = true; 1037 ap->args.page_replace = true; 1038 1039 /* Don't overflow end offset */ 1040 if (pos + (count - 1) == LLONG_MAX) { 1041 count--; 1042 ap->descs[ap->num_folios - 1].length--; 1043 } 1044 WARN_ON((loff_t) (pos + count) < 0); 1045 1046 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 1047 ia->read.attr_ver = fuse_get_attr_version(fm->fc); 1048 if (async) { 1049 ia->ff = fuse_file_get(ff); 1050 ap->args.end = fuse_readpages_end; 1051 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL); 1052 if (!err) 1053 return; 1054 } else { 1055 res = fuse_simple_request(fm, &ap->args); 1056 err = res < 0 ? res : 0; 1057 } 1058 fuse_readpages_end(fm, &ap->args, err); 1059 } 1060 1061 static void fuse_readahead(struct readahead_control *rac) 1062 { 1063 struct inode *inode = rac->mapping->host; 1064 struct fuse_conn *fc = get_fuse_conn(inode); 1065 struct fuse_fill_read_data data = { 1066 .file = rac->file, 1067 .fc = fc, 1068 }; 1069 struct iomap_read_folio_ctx ctx = { 1070 .ops = &fuse_iomap_read_ops, 1071 .rac = rac, 1072 .read_ctx = &data 1073 }; 1074 1075 if (fuse_is_bad(inode)) 1076 return; 1077 1078 iomap_readahead(&fuse_iomap_ops, &ctx); 1079 } 1080 1081 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to) 1082 { 1083 struct inode *inode = iocb->ki_filp->f_mapping->host; 1084 struct fuse_conn *fc = get_fuse_conn(inode); 1085 1086 /* 1087 * In auto invalidate mode, always update attributes on read. 1088 * Otherwise, only update if we attempt to read past EOF (to ensure 1089 * i_size is up to date). 1090 */ 1091 if (fc->auto_inval_data || 1092 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) { 1093 int err; 1094 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE); 1095 if (err) 1096 return err; 1097 } 1098 1099 return generic_file_read_iter(iocb, to); 1100 } 1101 1102 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff, 1103 loff_t pos, size_t count) 1104 { 1105 struct fuse_args *args = &ia->ap.args; 1106 1107 ia->write.in.fh = ff->fh; 1108 ia->write.in.offset = pos; 1109 ia->write.in.size = count; 1110 args->opcode = FUSE_WRITE; 1111 args->nodeid = ff->nodeid; 1112 args->in_numargs = 2; 1113 if (ff->fm->fc->minor < 9) 1114 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 1115 else 1116 args->in_args[0].size = sizeof(ia->write.in); 1117 args->in_args[0].value = &ia->write.in; 1118 args->in_args[1].size = count; 1119 args->out_numargs = 1; 1120 args->out_args[0].size = sizeof(ia->write.out); 1121 args->out_args[0].value = &ia->write.out; 1122 } 1123 1124 static unsigned int fuse_write_flags(struct kiocb *iocb) 1125 { 1126 unsigned int flags = iocb->ki_filp->f_flags; 1127 1128 if (iocb_is_dsync(iocb)) 1129 flags |= O_DSYNC; 1130 if (iocb->ki_flags & IOCB_SYNC) 1131 flags |= O_SYNC; 1132 1133 return flags; 1134 } 1135 1136 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos, 1137 size_t count, fl_owner_t owner) 1138 { 1139 struct kiocb *iocb = ia->io->iocb; 1140 struct file *file = iocb->ki_filp; 1141 struct fuse_file *ff = file->private_data; 1142 struct fuse_mount *fm = ff->fm; 1143 struct fuse_write_in *inarg = &ia->write.in; 1144 ssize_t err; 1145 1146 fuse_write_args_fill(ia, ff, pos, count); 1147 inarg->flags = fuse_write_flags(iocb); 1148 if (owner != NULL) { 1149 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 1150 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner); 1151 } 1152 1153 if (ia->io->async) 1154 return fuse_async_req_send(fm, ia, count); 1155 1156 err = fuse_simple_request(fm, &ia->ap.args); 1157 if (!err && ia->write.out.size > count) 1158 err = -EIO; 1159 1160 return err ?: ia->write.out.size; 1161 } 1162 1163 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written) 1164 { 1165 struct fuse_conn *fc = get_fuse_conn(inode); 1166 struct fuse_inode *fi = get_fuse_inode(inode); 1167 bool ret = false; 1168 1169 spin_lock(&fi->lock); 1170 fi->attr_version = atomic64_inc_return(&fc->attr_version); 1171 if (written > 0 && pos > inode->i_size) { 1172 i_size_write(inode, pos); 1173 ret = true; 1174 } 1175 spin_unlock(&fi->lock); 1176 1177 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 1178 1179 return ret; 1180 } 1181 1182 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, 1183 struct kiocb *iocb, struct inode *inode, 1184 loff_t pos, size_t count) 1185 { 1186 struct fuse_args_pages *ap = &ia->ap; 1187 struct file *file = iocb->ki_filp; 1188 struct fuse_file *ff = file->private_data; 1189 struct fuse_mount *fm = ff->fm; 1190 unsigned int offset, i; 1191 bool short_write; 1192 int err; 1193 1194 for (i = 0; i < ap->num_folios; i++) 1195 folio_wait_writeback(ap->folios[i]); 1196 1197 fuse_write_args_fill(ia, ff, pos, count); 1198 ia->write.in.flags = fuse_write_flags(iocb); 1199 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID)) 1200 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; 1201 1202 err = fuse_simple_request(fm, &ap->args); 1203 if (!err && ia->write.out.size > count) 1204 err = -EIO; 1205 1206 short_write = ia->write.out.size < count; 1207 offset = ap->descs[0].offset; 1208 count = ia->write.out.size; 1209 for (i = 0; i < ap->num_folios; i++) { 1210 struct folio *folio = ap->folios[i]; 1211 1212 if (err) { 1213 folio_clear_uptodate(folio); 1214 } else { 1215 if (count >= folio_size(folio) - offset) 1216 count -= folio_size(folio) - offset; 1217 else { 1218 if (short_write) 1219 folio_clear_uptodate(folio); 1220 count = 0; 1221 } 1222 offset = 0; 1223 } 1224 if (ia->write.folio_locked && (i == ap->num_folios - 1)) 1225 folio_unlock(folio); 1226 folio_put(folio); 1227 } 1228 1229 return err; 1230 } 1231 1232 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, 1233 struct address_space *mapping, 1234 struct iov_iter *ii, loff_t pos, 1235 unsigned int max_folios) 1236 { 1237 struct fuse_args_pages *ap = &ia->ap; 1238 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1239 unsigned offset = pos & (PAGE_SIZE - 1); 1240 size_t count = 0; 1241 unsigned int num; 1242 int err = 0; 1243 1244 num = min(iov_iter_count(ii), fc->max_write); 1245 1246 ap->args.in_pages = true; 1247 1248 while (num && ap->num_folios < max_folios) { 1249 size_t tmp; 1250 struct folio *folio; 1251 pgoff_t index = pos >> PAGE_SHIFT; 1252 unsigned int bytes; 1253 unsigned int folio_offset; 1254 1255 again: 1256 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 1257 mapping_gfp_mask(mapping)); 1258 if (IS_ERR(folio)) { 1259 err = PTR_ERR(folio); 1260 break; 1261 } 1262 1263 if (mapping_writably_mapped(mapping)) 1264 flush_dcache_folio(folio); 1265 1266 folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset; 1267 bytes = min(folio_size(folio) - folio_offset, num); 1268 1269 tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii); 1270 flush_dcache_folio(folio); 1271 1272 if (!tmp) { 1273 folio_unlock(folio); 1274 folio_put(folio); 1275 1276 /* 1277 * Ensure forward progress by faulting in 1278 * while not holding the folio lock: 1279 */ 1280 if (fault_in_iov_iter_readable(ii, bytes)) { 1281 err = -EFAULT; 1282 break; 1283 } 1284 1285 goto again; 1286 } 1287 1288 ap->folios[ap->num_folios] = folio; 1289 ap->descs[ap->num_folios].offset = folio_offset; 1290 ap->descs[ap->num_folios].length = tmp; 1291 ap->num_folios++; 1292 1293 count += tmp; 1294 pos += tmp; 1295 num -= tmp; 1296 offset += tmp; 1297 if (offset == folio_size(folio)) 1298 offset = 0; 1299 1300 /* If we copied full folio, mark it uptodate */ 1301 if (tmp == folio_size(folio)) 1302 folio_mark_uptodate(folio); 1303 1304 if (folio_test_uptodate(folio)) { 1305 folio_unlock(folio); 1306 } else { 1307 ia->write.folio_locked = true; 1308 break; 1309 } 1310 if (!fc->big_writes || offset != 0) 1311 break; 1312 } 1313 1314 return count > 0 ? count : err; 1315 } 1316 1317 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len, 1318 unsigned int max_pages) 1319 { 1320 return min_t(unsigned int, 1321 ((pos + len - 1) >> PAGE_SHIFT) - 1322 (pos >> PAGE_SHIFT) + 1, 1323 max_pages); 1324 } 1325 1326 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) 1327 { 1328 struct address_space *mapping = iocb->ki_filp->f_mapping; 1329 struct inode *inode = mapping->host; 1330 struct fuse_conn *fc = get_fuse_conn(inode); 1331 struct fuse_inode *fi = get_fuse_inode(inode); 1332 loff_t pos = iocb->ki_pos; 1333 int err = 0; 1334 ssize_t res = 0; 1335 1336 if (inode->i_size < pos + iov_iter_count(ii)) 1337 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1338 1339 do { 1340 ssize_t count; 1341 struct fuse_io_args ia = {}; 1342 struct fuse_args_pages *ap = &ia.ap; 1343 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), 1344 fc->max_pages); 1345 1346 ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->descs); 1347 if (!ap->folios) { 1348 err = -ENOMEM; 1349 break; 1350 } 1351 1352 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages); 1353 if (count <= 0) { 1354 err = count; 1355 } else { 1356 err = fuse_send_write_pages(&ia, iocb, inode, 1357 pos, count); 1358 if (!err) { 1359 size_t num_written = ia.write.out.size; 1360 1361 res += num_written; 1362 pos += num_written; 1363 1364 /* break out of the loop on short write */ 1365 if (num_written != count) 1366 err = -EIO; 1367 } 1368 } 1369 kfree(ap->folios); 1370 } while (!err && iov_iter_count(ii)); 1371 1372 fuse_write_update_attr(inode, pos, res); 1373 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1374 1375 if (!res) 1376 return err; 1377 iocb->ki_pos += res; 1378 return res; 1379 } 1380 1381 static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter) 1382 { 1383 struct inode *inode = file_inode(iocb->ki_filp); 1384 1385 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); 1386 } 1387 1388 /* 1389 * @return true if an exclusive lock for direct IO writes is needed 1390 */ 1391 static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from) 1392 { 1393 struct file *file = iocb->ki_filp; 1394 struct fuse_file *ff = file->private_data; 1395 struct inode *inode = file_inode(iocb->ki_filp); 1396 struct fuse_inode *fi = get_fuse_inode(inode); 1397 1398 /* Server side has to advise that it supports parallel dio writes. */ 1399 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) 1400 return true; 1401 1402 /* 1403 * Append will need to know the eventual EOF - always needs an 1404 * exclusive lock. 1405 */ 1406 if (iocb->ki_flags & IOCB_APPEND) 1407 return true; 1408 1409 /* shared locks are not allowed with parallel page cache IO */ 1410 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) 1411 return true; 1412 1413 /* Parallel dio beyond EOF is not supported, at least for now. */ 1414 if (fuse_io_past_eof(iocb, from)) 1415 return true; 1416 1417 return false; 1418 } 1419 1420 static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, 1421 bool *exclusive) 1422 { 1423 struct inode *inode = file_inode(iocb->ki_filp); 1424 struct fuse_inode *fi = get_fuse_inode(inode); 1425 1426 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); 1427 if (*exclusive) { 1428 inode_lock(inode); 1429 } else { 1430 inode_lock_shared(inode); 1431 /* 1432 * New parallal dio allowed only if inode is not in caching 1433 * mode and denies new opens in caching mode. This check 1434 * should be performed only after taking shared inode lock. 1435 * Previous past eof check was without inode lock and might 1436 * have raced, so check it again. 1437 */ 1438 if (fuse_io_past_eof(iocb, from) || 1439 fuse_inode_uncached_io_start(fi, NULL) != 0) { 1440 inode_unlock_shared(inode); 1441 inode_lock(inode); 1442 *exclusive = true; 1443 } 1444 } 1445 } 1446 1447 static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) 1448 { 1449 struct inode *inode = file_inode(iocb->ki_filp); 1450 struct fuse_inode *fi = get_fuse_inode(inode); 1451 1452 if (exclusive) { 1453 inode_unlock(inode); 1454 } else { 1455 /* Allow opens in caching mode after last parallel dio end */ 1456 fuse_inode_uncached_io_end(fi); 1457 inode_unlock_shared(inode); 1458 } 1459 } 1460 1461 static const struct iomap_write_ops fuse_iomap_write_ops = { 1462 .read_folio_range = fuse_iomap_read_folio_range, 1463 }; 1464 1465 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) 1466 { 1467 struct file *file = iocb->ki_filp; 1468 struct mnt_idmap *idmap = file_mnt_idmap(file); 1469 struct address_space *mapping = file->f_mapping; 1470 ssize_t written = 0; 1471 struct inode *inode = mapping->host; 1472 ssize_t err, count; 1473 struct fuse_conn *fc = get_fuse_conn(inode); 1474 bool writeback = false; 1475 1476 if (fc->writeback_cache) { 1477 /* Update size (EOF optimization) and mode (SUID clearing) */ 1478 err = fuse_update_attributes(mapping->host, file, 1479 STATX_SIZE | STATX_MODE); 1480 if (err) 1481 return err; 1482 1483 if (!fc->handle_killpriv_v2 || 1484 !setattr_should_drop_suidgid(idmap, file_inode(file))) 1485 writeback = true; 1486 } 1487 1488 inode_lock(inode); 1489 1490 err = count = generic_write_checks(iocb, from); 1491 if (err <= 0) 1492 goto out; 1493 1494 task_io_account_write(count); 1495 1496 err = kiocb_modified(iocb); 1497 if (err) 1498 goto out; 1499 1500 if (iocb->ki_flags & IOCB_DIRECT) { 1501 written = generic_file_direct_write(iocb, from); 1502 if (written < 0 || !iov_iter_count(from)) 1503 goto out; 1504 written = direct_write_fallback(iocb, from, written, 1505 fuse_perform_write(iocb, from)); 1506 } else if (writeback) { 1507 /* 1508 * Use iomap so that we can do granular uptodate reads 1509 * and granular dirty tracking for large folios. 1510 */ 1511 written = iomap_file_buffered_write(iocb, from, 1512 &fuse_iomap_ops, 1513 &fuse_iomap_write_ops, 1514 file); 1515 } else { 1516 written = fuse_perform_write(iocb, from); 1517 } 1518 out: 1519 inode_unlock(inode); 1520 if (written > 0) 1521 written = generic_write_sync(iocb, written); 1522 1523 return written ? written : err; 1524 } 1525 1526 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1527 { 1528 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset; 1529 } 1530 1531 static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1532 size_t max_size) 1533 { 1534 return min(iov_iter_single_seg_count(ii), max_size); 1535 } 1536 1537 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, 1538 size_t *nbytesp, int write, 1539 unsigned int max_pages, 1540 bool use_pages_for_kvec_io) 1541 { 1542 bool flush_or_invalidate = false; 1543 unsigned int nr_pages = 0; 1544 size_t nbytes = 0; /* # bytes already packed in req */ 1545 ssize_t ret = 0; 1546 1547 /* Special case for kernel I/O: can copy directly into the buffer. 1548 * However if the implementation of fuse_conn requires pages instead of 1549 * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead. 1550 */ 1551 if (iov_iter_is_kvec(ii)) { 1552 void *user_addr = (void *)fuse_get_user_addr(ii); 1553 1554 if (!use_pages_for_kvec_io) { 1555 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1556 1557 if (write) 1558 ap->args.in_args[1].value = user_addr; 1559 else 1560 ap->args.out_args[0].value = user_addr; 1561 1562 iov_iter_advance(ii, frag_size); 1563 *nbytesp = frag_size; 1564 return 0; 1565 } 1566 1567 if (is_vmalloc_addr(user_addr)) { 1568 ap->args.vmap_base = user_addr; 1569 flush_or_invalidate = true; 1570 } 1571 } 1572 1573 /* 1574 * Until there is support for iov_iter_extract_folios(), we have to 1575 * manually extract pages using iov_iter_extract_pages() and then 1576 * copy that to a folios array. 1577 */ 1578 struct page **pages = kzalloc(max_pages * sizeof(struct page *), 1579 GFP_KERNEL); 1580 if (!pages) { 1581 ret = -ENOMEM; 1582 goto out; 1583 } 1584 1585 while (nbytes < *nbytesp && nr_pages < max_pages) { 1586 unsigned nfolios, i; 1587 size_t start; 1588 1589 ret = iov_iter_extract_pages(ii, &pages, 1590 *nbytesp - nbytes, 1591 max_pages - nr_pages, 1592 0, &start); 1593 if (ret < 0) 1594 break; 1595 1596 nbytes += ret; 1597 1598 nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE); 1599 1600 for (i = 0; i < nfolios; i++) { 1601 struct folio *folio = page_folio(pages[i]); 1602 unsigned int offset = start + 1603 (folio_page_idx(folio, pages[i]) << PAGE_SHIFT); 1604 unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start); 1605 1606 ap->descs[ap->num_folios].offset = offset; 1607 ap->descs[ap->num_folios].length = len; 1608 ap->folios[ap->num_folios] = folio; 1609 start = 0; 1610 ret -= len; 1611 ap->num_folios++; 1612 } 1613 1614 nr_pages += nfolios; 1615 } 1616 kfree(pages); 1617 1618 if (write && flush_or_invalidate) 1619 flush_kernel_vmap_range(ap->args.vmap_base, nbytes); 1620 1621 ap->args.invalidate_vmap = !write && flush_or_invalidate; 1622 ap->args.is_pinned = iov_iter_extract_will_pin(ii); 1623 ap->args.user_pages = true; 1624 if (write) 1625 ap->args.in_pages = true; 1626 else 1627 ap->args.out_pages = true; 1628 1629 out: 1630 *nbytesp = nbytes; 1631 1632 return ret < 0 ? ret : 0; 1633 } 1634 1635 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, 1636 loff_t *ppos, int flags) 1637 { 1638 int write = flags & FUSE_DIO_WRITE; 1639 int cuse = flags & FUSE_DIO_CUSE; 1640 struct file *file = io->iocb->ki_filp; 1641 struct address_space *mapping = file->f_mapping; 1642 struct inode *inode = mapping->host; 1643 struct fuse_file *ff = file->private_data; 1644 struct fuse_conn *fc = ff->fm->fc; 1645 size_t nmax = write ? fc->max_write : fc->max_read; 1646 loff_t pos = *ppos; 1647 size_t count = iov_iter_count(iter); 1648 pgoff_t idx_from = pos >> PAGE_SHIFT; 1649 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; 1650 ssize_t res = 0; 1651 int err = 0; 1652 struct fuse_io_args *ia; 1653 unsigned int max_pages; 1654 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO; 1655 1656 max_pages = iov_iter_npages(iter, fc->max_pages); 1657 ia = fuse_io_alloc(io, max_pages); 1658 if (!ia) 1659 return -ENOMEM; 1660 1661 if (fopen_direct_io && fc->direct_io_allow_mmap) { 1662 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); 1663 if (res) { 1664 fuse_io_free(ia); 1665 return res; 1666 } 1667 } 1668 if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) { 1669 if (!write) 1670 inode_lock(inode); 1671 fuse_sync_writes(inode); 1672 if (!write) 1673 inode_unlock(inode); 1674 } 1675 1676 if (fopen_direct_io && write) { 1677 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to); 1678 if (res) { 1679 fuse_io_free(ia); 1680 return res; 1681 } 1682 } 1683 1684 io->should_dirty = !write && user_backed_iter(iter); 1685 while (count) { 1686 ssize_t nres; 1687 fl_owner_t owner = current->files; 1688 size_t nbytes = min(count, nmax); 1689 1690 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, 1691 max_pages, fc->use_pages_for_kvec_io); 1692 if (err && !nbytes) 1693 break; 1694 1695 if (write) { 1696 if (!capable(CAP_FSETID)) 1697 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; 1698 1699 nres = fuse_send_write(ia, pos, nbytes, owner); 1700 } else { 1701 nres = fuse_send_read(ia, pos, nbytes, owner); 1702 } 1703 1704 if (!io->async || nres < 0) { 1705 fuse_release_user_pages(&ia->ap, nres, io->should_dirty); 1706 fuse_io_free(ia); 1707 } 1708 ia = NULL; 1709 if (nres < 0) { 1710 iov_iter_revert(iter, nbytes); 1711 err = nres; 1712 break; 1713 } 1714 WARN_ON(nres > nbytes); 1715 1716 count -= nres; 1717 res += nres; 1718 pos += nres; 1719 if (nres != nbytes) { 1720 iov_iter_revert(iter, nbytes - nres); 1721 break; 1722 } 1723 if (count) { 1724 max_pages = iov_iter_npages(iter, fc->max_pages); 1725 ia = fuse_io_alloc(io, max_pages); 1726 if (!ia) 1727 break; 1728 } 1729 } 1730 if (ia) 1731 fuse_io_free(ia); 1732 if (res > 0) 1733 *ppos = pos; 1734 1735 return res > 0 ? res : err; 1736 } 1737 EXPORT_SYMBOL_GPL(fuse_direct_io); 1738 1739 static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1740 struct iov_iter *iter, 1741 loff_t *ppos) 1742 { 1743 ssize_t res; 1744 struct inode *inode = file_inode(io->iocb->ki_filp); 1745 1746 res = fuse_direct_io(io, iter, ppos, 0); 1747 1748 fuse_invalidate_atime(inode); 1749 1750 return res; 1751 } 1752 1753 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter); 1754 1755 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) 1756 { 1757 ssize_t res; 1758 1759 if (!is_sync_kiocb(iocb)) { 1760 res = fuse_direct_IO(iocb, to); 1761 } else { 1762 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1763 1764 res = __fuse_direct_read(&io, to, &iocb->ki_pos); 1765 } 1766 1767 return res; 1768 } 1769 1770 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) 1771 { 1772 struct inode *inode = file_inode(iocb->ki_filp); 1773 ssize_t res; 1774 bool exclusive; 1775 1776 fuse_dio_lock(iocb, from, &exclusive); 1777 res = generic_write_checks(iocb, from); 1778 if (res > 0) { 1779 task_io_account_write(res); 1780 if (!is_sync_kiocb(iocb)) { 1781 res = fuse_direct_IO(iocb, from); 1782 } else { 1783 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1784 1785 res = fuse_direct_io(&io, from, &iocb->ki_pos, 1786 FUSE_DIO_WRITE); 1787 fuse_write_update_attr(inode, iocb->ki_pos, res); 1788 } 1789 } 1790 fuse_dio_unlock(iocb, exclusive); 1791 1792 return res; 1793 } 1794 1795 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1796 { 1797 struct file *file = iocb->ki_filp; 1798 struct fuse_file *ff = file->private_data; 1799 struct inode *inode = file_inode(file); 1800 1801 if (fuse_is_bad(inode)) 1802 return -EIO; 1803 1804 if (FUSE_IS_DAX(inode)) 1805 return fuse_dax_read_iter(iocb, to); 1806 1807 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1808 if (ff->open_flags & FOPEN_DIRECT_IO) 1809 return fuse_direct_read_iter(iocb, to); 1810 else if (fuse_file_passthrough(ff)) 1811 return fuse_passthrough_read_iter(iocb, to); 1812 else 1813 return fuse_cache_read_iter(iocb, to); 1814 } 1815 1816 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1817 { 1818 struct file *file = iocb->ki_filp; 1819 struct fuse_file *ff = file->private_data; 1820 struct inode *inode = file_inode(file); 1821 1822 if (fuse_is_bad(inode)) 1823 return -EIO; 1824 1825 if (FUSE_IS_DAX(inode)) 1826 return fuse_dax_write_iter(iocb, from); 1827 1828 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1829 if (ff->open_flags & FOPEN_DIRECT_IO) 1830 return fuse_direct_write_iter(iocb, from); 1831 else if (fuse_file_passthrough(ff)) 1832 return fuse_passthrough_write_iter(iocb, from); 1833 else 1834 return fuse_cache_write_iter(iocb, from); 1835 } 1836 1837 static ssize_t fuse_splice_read(struct file *in, loff_t *ppos, 1838 struct pipe_inode_info *pipe, size_t len, 1839 unsigned int flags) 1840 { 1841 struct fuse_file *ff = in->private_data; 1842 1843 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1844 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO)) 1845 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags); 1846 else 1847 return filemap_splice_read(in, ppos, pipe, len, flags); 1848 } 1849 1850 static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out, 1851 loff_t *ppos, size_t len, unsigned int flags) 1852 { 1853 struct fuse_file *ff = out->private_data; 1854 1855 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1856 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO)) 1857 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags); 1858 else 1859 return iter_file_splice_write(pipe, out, ppos, len, flags); 1860 } 1861 1862 static void fuse_writepage_free(struct fuse_writepage_args *wpa) 1863 { 1864 struct fuse_args_pages *ap = &wpa->ia.ap; 1865 1866 if (wpa->bucket) 1867 fuse_sync_bucket_dec(wpa->bucket); 1868 1869 fuse_file_put(wpa->ia.ff, false); 1870 1871 kfree(ap->folios); 1872 kfree(wpa); 1873 } 1874 1875 static void fuse_writepage_finish(struct fuse_writepage_args *wpa) 1876 { 1877 struct fuse_args_pages *ap = &wpa->ia.ap; 1878 struct inode *inode = wpa->inode; 1879 struct fuse_inode *fi = get_fuse_inode(inode); 1880 int i; 1881 1882 for (i = 0; i < ap->num_folios; i++) 1883 /* 1884 * Benchmarks showed that ending writeback within the 1885 * scope of the fi->lock alleviates xarray lock 1886 * contention and noticeably improves performance. 1887 */ 1888 iomap_finish_folio_write(inode, ap->folios[i], 1); 1889 1890 wake_up(&fi->page_waitq); 1891 } 1892 1893 /* Called under fi->lock, may release and reacquire it */ 1894 static void fuse_send_writepage(struct fuse_mount *fm, 1895 struct fuse_writepage_args *wpa, loff_t size) 1896 __releases(fi->lock) 1897 __acquires(fi->lock) 1898 { 1899 struct fuse_inode *fi = get_fuse_inode(wpa->inode); 1900 struct fuse_args_pages *ap = &wpa->ia.ap; 1901 struct fuse_write_in *inarg = &wpa->ia.write.in; 1902 struct fuse_args *args = &ap->args; 1903 __u64 data_size = 0; 1904 int err, i; 1905 1906 for (i = 0; i < ap->num_folios; i++) 1907 data_size += ap->descs[i].length; 1908 1909 fi->writectr++; 1910 if (inarg->offset + data_size <= size) { 1911 inarg->size = data_size; 1912 } else if (inarg->offset < size) { 1913 inarg->size = size - inarg->offset; 1914 } else { 1915 /* Got truncated off completely */ 1916 goto out_free; 1917 } 1918 1919 args->in_args[1].size = inarg->size; 1920 args->force = true; 1921 args->nocreds = true; 1922 1923 err = fuse_simple_background(fm, args, GFP_ATOMIC); 1924 if (err == -ENOMEM) { 1925 spin_unlock(&fi->lock); 1926 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL); 1927 spin_lock(&fi->lock); 1928 } 1929 1930 /* Fails on broken connection only */ 1931 if (unlikely(err)) 1932 goto out_free; 1933 1934 return; 1935 1936 out_free: 1937 fi->writectr--; 1938 fuse_writepage_finish(wpa); 1939 spin_unlock(&fi->lock); 1940 fuse_writepage_free(wpa); 1941 spin_lock(&fi->lock); 1942 } 1943 1944 /* 1945 * If fi->writectr is positive (no truncate or fsync going on) send 1946 * all queued writepage requests. 1947 * 1948 * Called with fi->lock 1949 */ 1950 void fuse_flush_writepages(struct inode *inode) 1951 __releases(fi->lock) 1952 __acquires(fi->lock) 1953 { 1954 struct fuse_mount *fm = get_fuse_mount(inode); 1955 struct fuse_inode *fi = get_fuse_inode(inode); 1956 loff_t crop = i_size_read(inode); 1957 struct fuse_writepage_args *wpa; 1958 1959 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1960 wpa = list_entry(fi->queued_writes.next, 1961 struct fuse_writepage_args, queue_entry); 1962 list_del_init(&wpa->queue_entry); 1963 fuse_send_writepage(fm, wpa, crop); 1964 } 1965 } 1966 1967 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, 1968 int error) 1969 { 1970 struct fuse_writepage_args *wpa = 1971 container_of(args, typeof(*wpa), ia.ap.args); 1972 struct inode *inode = wpa->inode; 1973 struct fuse_inode *fi = get_fuse_inode(inode); 1974 struct fuse_conn *fc = get_fuse_conn(inode); 1975 1976 mapping_set_error(inode->i_mapping, error); 1977 /* 1978 * A writeback finished and this might have updated mtime/ctime on 1979 * server making local mtime/ctime stale. Hence invalidate attrs. 1980 * Do this only if writeback_cache is not enabled. If writeback_cache 1981 * is enabled, we trust local ctime/mtime. 1982 */ 1983 if (!fc->writeback_cache) 1984 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY); 1985 spin_lock(&fi->lock); 1986 fi->writectr--; 1987 fuse_writepage_finish(wpa); 1988 spin_unlock(&fi->lock); 1989 fuse_writepage_free(wpa); 1990 } 1991 1992 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi) 1993 { 1994 struct fuse_file *ff; 1995 1996 spin_lock(&fi->lock); 1997 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file, 1998 write_entry); 1999 if (ff) 2000 fuse_file_get(ff); 2001 spin_unlock(&fi->lock); 2002 2003 return ff; 2004 } 2005 2006 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi) 2007 { 2008 struct fuse_file *ff = __fuse_write_file_get(fi); 2009 WARN_ON(!ff); 2010 return ff; 2011 } 2012 2013 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 2014 { 2015 struct fuse_inode *fi = get_fuse_inode(inode); 2016 struct fuse_file *ff; 2017 int err; 2018 2019 ff = __fuse_write_file_get(fi); 2020 err = fuse_flush_times(inode, ff); 2021 if (ff) 2022 fuse_file_put(ff, false); 2023 2024 return err; 2025 } 2026 2027 static struct fuse_writepage_args *fuse_writepage_args_alloc(void) 2028 { 2029 struct fuse_writepage_args *wpa; 2030 struct fuse_args_pages *ap; 2031 2032 wpa = kzalloc(sizeof(*wpa), GFP_NOFS); 2033 if (wpa) { 2034 ap = &wpa->ia.ap; 2035 ap->num_folios = 0; 2036 ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->descs); 2037 if (!ap->folios) { 2038 kfree(wpa); 2039 wpa = NULL; 2040 } 2041 } 2042 return wpa; 2043 2044 } 2045 2046 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, 2047 struct fuse_writepage_args *wpa) 2048 { 2049 if (!fc->sync_fs) 2050 return; 2051 2052 rcu_read_lock(); 2053 /* Prevent resurrection of dead bucket in unlikely race with syncfs */ 2054 do { 2055 wpa->bucket = rcu_dereference(fc->curr_bucket); 2056 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count))); 2057 rcu_read_unlock(); 2058 } 2059 2060 static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio, 2061 uint32_t folio_index, loff_t offset, unsigned len) 2062 { 2063 struct fuse_args_pages *ap = &wpa->ia.ap; 2064 2065 ap->folios[folio_index] = folio; 2066 ap->descs[folio_index].offset = offset; 2067 ap->descs[folio_index].length = len; 2068 } 2069 2070 static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio, 2071 size_t offset, 2072 struct fuse_file *ff) 2073 { 2074 struct inode *inode = folio->mapping->host; 2075 struct fuse_conn *fc = get_fuse_conn(inode); 2076 struct fuse_writepage_args *wpa; 2077 struct fuse_args_pages *ap; 2078 2079 wpa = fuse_writepage_args_alloc(); 2080 if (!wpa) 2081 return NULL; 2082 2083 fuse_writepage_add_to_bucket(fc, wpa); 2084 fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0); 2085 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 2086 wpa->inode = inode; 2087 wpa->ia.ff = ff; 2088 2089 ap = &wpa->ia.ap; 2090 ap->args.in_pages = true; 2091 ap->args.end = fuse_writepage_end; 2092 2093 return wpa; 2094 } 2095 2096 struct fuse_fill_wb_data { 2097 struct fuse_writepage_args *wpa; 2098 struct fuse_file *ff; 2099 unsigned int max_folios; 2100 /* 2101 * nr_bytes won't overflow since fuse_folios_need_send() caps 2102 * wb requests to never exceed fc->max_pages (which has an upper bound 2103 * of U16_MAX). 2104 */ 2105 unsigned int nr_bytes; 2106 }; 2107 2108 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data, 2109 unsigned int max_pages) 2110 { 2111 struct fuse_args_pages *ap = &data->wpa->ia.ap; 2112 struct folio **folios; 2113 struct fuse_folio_desc *descs; 2114 unsigned int nfolios = min_t(unsigned int, 2115 max_t(unsigned int, data->max_folios * 2, 2116 FUSE_DEFAULT_MAX_PAGES_PER_REQ), 2117 max_pages); 2118 WARN_ON(nfolios <= data->max_folios); 2119 2120 folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs); 2121 if (!folios) 2122 return false; 2123 2124 memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios); 2125 memcpy(descs, ap->descs, sizeof(struct fuse_folio_desc) * ap->num_folios); 2126 kfree(ap->folios); 2127 ap->folios = folios; 2128 ap->descs = descs; 2129 data->max_folios = nfolios; 2130 2131 return true; 2132 } 2133 2134 static void fuse_writepages_send(struct inode *inode, 2135 struct fuse_fill_wb_data *data) 2136 { 2137 struct fuse_writepage_args *wpa = data->wpa; 2138 struct fuse_inode *fi = get_fuse_inode(inode); 2139 2140 spin_lock(&fi->lock); 2141 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 2142 fuse_flush_writepages(inode); 2143 spin_unlock(&fi->lock); 2144 } 2145 2146 static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos, 2147 unsigned len, struct fuse_args_pages *ap, 2148 unsigned cur_bytes, bool write) 2149 { 2150 struct folio *prev_folio; 2151 struct fuse_folio_desc prev_desc; 2152 unsigned bytes = cur_bytes + len; 2153 loff_t prev_pos; 2154 size_t max_bytes = write ? fc->max_write : fc->max_read; 2155 2156 WARN_ON(!ap->num_folios); 2157 2158 /* Reached max pages */ 2159 if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages) 2160 return true; 2161 2162 if (bytes > max_bytes) 2163 return true; 2164 2165 /* Discontinuity */ 2166 prev_folio = ap->folios[ap->num_folios - 1]; 2167 prev_desc = ap->descs[ap->num_folios - 1]; 2168 prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length; 2169 if (prev_pos != pos) 2170 return true; 2171 2172 return false; 2173 } 2174 2175 static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc, 2176 struct folio *folio, u64 pos, 2177 unsigned len, u64 end_pos) 2178 { 2179 struct fuse_fill_wb_data *data = wpc->wb_ctx; 2180 struct fuse_writepage_args *wpa = data->wpa; 2181 struct fuse_args_pages *ap = &wpa->ia.ap; 2182 struct inode *inode = wpc->inode; 2183 struct fuse_inode *fi = get_fuse_inode(inode); 2184 struct fuse_conn *fc = get_fuse_conn(inode); 2185 loff_t offset = offset_in_folio(folio, pos); 2186 2187 WARN_ON_ONCE(!data); 2188 2189 if (!data->ff) { 2190 data->ff = fuse_write_file_get(fi); 2191 if (!data->ff) 2192 return -EIO; 2193 } 2194 2195 if (wpa) { 2196 bool send = fuse_folios_need_send(fc, pos, len, ap, 2197 data->nr_bytes, true); 2198 2199 if (!send) { 2200 /* 2201 * Need to grow the pages array? If so, did the 2202 * expansion fail? 2203 */ 2204 send = (ap->num_folios == data->max_folios) && 2205 !fuse_pages_realloc(data, fc->max_pages); 2206 } 2207 2208 if (send) { 2209 fuse_writepages_send(inode, data); 2210 data->wpa = NULL; 2211 data->nr_bytes = 0; 2212 } 2213 } 2214 2215 if (data->wpa == NULL) { 2216 wpa = fuse_writepage_args_setup(folio, offset, data->ff); 2217 if (!wpa) 2218 return -ENOMEM; 2219 fuse_file_get(wpa->ia.ff); 2220 data->max_folios = 1; 2221 ap = &wpa->ia.ap; 2222 } 2223 2224 iomap_start_folio_write(inode, folio, 1); 2225 fuse_writepage_args_page_fill(wpa, folio, ap->num_folios, 2226 offset, len); 2227 data->nr_bytes += len; 2228 2229 ap->num_folios++; 2230 if (!data->wpa) 2231 data->wpa = wpa; 2232 2233 return len; 2234 } 2235 2236 static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc, 2237 int error) 2238 { 2239 struct fuse_fill_wb_data *data = wpc->wb_ctx; 2240 2241 WARN_ON_ONCE(!data); 2242 2243 if (data->wpa) { 2244 WARN_ON(!data->wpa->ia.ap.num_folios); 2245 fuse_writepages_send(wpc->inode, data); 2246 } 2247 2248 if (data->ff) 2249 fuse_file_put(data->ff, false); 2250 2251 return error; 2252 } 2253 2254 static const struct iomap_writeback_ops fuse_writeback_ops = { 2255 .writeback_range = fuse_iomap_writeback_range, 2256 .writeback_submit = fuse_iomap_writeback_submit, 2257 }; 2258 2259 static int fuse_writepages(struct address_space *mapping, 2260 struct writeback_control *wbc) 2261 { 2262 struct inode *inode = mapping->host; 2263 struct fuse_conn *fc = get_fuse_conn(inode); 2264 struct fuse_fill_wb_data data = {}; 2265 struct iomap_writepage_ctx wpc = { 2266 .inode = inode, 2267 .iomap.type = IOMAP_MAPPED, 2268 .wbc = wbc, 2269 .ops = &fuse_writeback_ops, 2270 .wb_ctx = &data, 2271 }; 2272 2273 if (fuse_is_bad(inode)) 2274 return -EIO; 2275 2276 if (wbc->sync_mode == WB_SYNC_NONE && 2277 fc->num_background >= fc->congestion_threshold) 2278 return 0; 2279 2280 return iomap_writepages(&wpc); 2281 } 2282 2283 static int fuse_launder_folio(struct folio *folio) 2284 { 2285 int err = 0; 2286 struct fuse_fill_wb_data data = {}; 2287 struct iomap_writepage_ctx wpc = { 2288 .inode = folio->mapping->host, 2289 .iomap.type = IOMAP_MAPPED, 2290 .ops = &fuse_writeback_ops, 2291 .wb_ctx = &data, 2292 }; 2293 2294 if (folio_clear_dirty_for_io(folio)) { 2295 err = iomap_writeback_folio(&wpc, folio); 2296 err = fuse_iomap_writeback_submit(&wpc, err); 2297 if (!err) 2298 folio_wait_writeback(folio); 2299 } 2300 return err; 2301 } 2302 2303 /* 2304 * Write back dirty data/metadata now (there may not be any suitable 2305 * open files later for data) 2306 */ 2307 static void fuse_vma_close(struct vm_area_struct *vma) 2308 { 2309 int err; 2310 2311 err = write_inode_now(vma->vm_file->f_mapping->host, 1); 2312 mapping_set_error(vma->vm_file->f_mapping, err); 2313 } 2314 2315 /* 2316 * Wait for writeback against this page to complete before allowing it 2317 * to be marked dirty again, and hence written back again, possibly 2318 * before the previous writepage completed. 2319 * 2320 * Block here, instead of in ->writepage(), so that the userspace fs 2321 * can only block processes actually operating on the filesystem. 2322 * 2323 * Otherwise unprivileged userspace fs would be able to block 2324 * unrelated: 2325 * 2326 * - page migration 2327 * - sync(2) 2328 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 2329 */ 2330 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) 2331 { 2332 struct folio *folio = page_folio(vmf->page); 2333 struct inode *inode = file_inode(vmf->vma->vm_file); 2334 2335 file_update_time(vmf->vma->vm_file); 2336 folio_lock(folio); 2337 if (folio->mapping != inode->i_mapping) { 2338 folio_unlock(folio); 2339 return VM_FAULT_NOPAGE; 2340 } 2341 2342 folio_wait_writeback(folio); 2343 return VM_FAULT_LOCKED; 2344 } 2345 2346 static const struct vm_operations_struct fuse_file_vm_ops = { 2347 .close = fuse_vma_close, 2348 .fault = filemap_fault, 2349 .map_pages = filemap_map_pages, 2350 .page_mkwrite = fuse_page_mkwrite, 2351 }; 2352 2353 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 2354 { 2355 struct fuse_file *ff = file->private_data; 2356 struct fuse_conn *fc = ff->fm->fc; 2357 struct inode *inode = file_inode(file); 2358 int rc; 2359 2360 /* DAX mmap is superior to direct_io mmap */ 2361 if (FUSE_IS_DAX(inode)) 2362 return fuse_dax_mmap(file, vma); 2363 2364 /* 2365 * If inode is in passthrough io mode, because it has some file open 2366 * in passthrough mode, either mmap to backing file or fail mmap, 2367 * because mixing cached mmap and passthrough io mode is not allowed. 2368 */ 2369 if (fuse_file_passthrough(ff)) 2370 return fuse_passthrough_mmap(file, vma); 2371 else if (fuse_inode_backing(get_fuse_inode(inode))) 2372 return -ENODEV; 2373 2374 /* 2375 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT, 2376 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP. 2377 */ 2378 if (ff->open_flags & FOPEN_DIRECT_IO) { 2379 /* 2380 * Can't provide the coherency needed for MAP_SHARED 2381 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set. 2382 */ 2383 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap) 2384 return -ENODEV; 2385 2386 invalidate_inode_pages2(file->f_mapping); 2387 2388 if (!(vma->vm_flags & VM_MAYSHARE)) { 2389 /* MAP_PRIVATE */ 2390 return generic_file_mmap(file, vma); 2391 } 2392 2393 /* 2394 * First mmap of direct_io file enters caching inode io mode. 2395 * Also waits for parallel dio writers to go into serial mode 2396 * (exclusive instead of shared lock). 2397 * After first mmap, the inode stays in caching io mode until 2398 * the direct_io file release. 2399 */ 2400 rc = fuse_file_cached_io_open(inode, ff); 2401 if (rc) 2402 return rc; 2403 } 2404 2405 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2406 fuse_link_write_file(file); 2407 2408 file_accessed(file); 2409 vma->vm_ops = &fuse_file_vm_ops; 2410 return 0; 2411 } 2412 2413 static int convert_fuse_file_lock(struct fuse_conn *fc, 2414 const struct fuse_file_lock *ffl, 2415 struct file_lock *fl) 2416 { 2417 switch (ffl->type) { 2418 case F_UNLCK: 2419 break; 2420 2421 case F_RDLCK: 2422 case F_WRLCK: 2423 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 2424 ffl->end < ffl->start) 2425 return -EIO; 2426 2427 fl->fl_start = ffl->start; 2428 fl->fl_end = ffl->end; 2429 2430 /* 2431 * Convert pid into init's pid namespace. The locks API will 2432 * translate it into the caller's pid namespace. 2433 */ 2434 rcu_read_lock(); 2435 fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); 2436 rcu_read_unlock(); 2437 break; 2438 2439 default: 2440 return -EIO; 2441 } 2442 fl->c.flc_type = ffl->type; 2443 return 0; 2444 } 2445 2446 static void fuse_lk_fill(struct fuse_args *args, struct file *file, 2447 const struct file_lock *fl, int opcode, pid_t pid, 2448 int flock, struct fuse_lk_in *inarg) 2449 { 2450 struct inode *inode = file_inode(file); 2451 struct fuse_conn *fc = get_fuse_conn(inode); 2452 struct fuse_file *ff = file->private_data; 2453 2454 memset(inarg, 0, sizeof(*inarg)); 2455 inarg->fh = ff->fh; 2456 inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner); 2457 inarg->lk.start = fl->fl_start; 2458 inarg->lk.end = fl->fl_end; 2459 inarg->lk.type = fl->c.flc_type; 2460 inarg->lk.pid = pid; 2461 if (flock) 2462 inarg->lk_flags |= FUSE_LK_FLOCK; 2463 args->opcode = opcode; 2464 args->nodeid = get_node_id(inode); 2465 args->in_numargs = 1; 2466 args->in_args[0].size = sizeof(*inarg); 2467 args->in_args[0].value = inarg; 2468 } 2469 2470 static int fuse_getlk(struct file *file, struct file_lock *fl) 2471 { 2472 struct inode *inode = file_inode(file); 2473 struct fuse_mount *fm = get_fuse_mount(inode); 2474 FUSE_ARGS(args); 2475 struct fuse_lk_in inarg; 2476 struct fuse_lk_out outarg; 2477 int err; 2478 2479 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); 2480 args.out_numargs = 1; 2481 args.out_args[0].size = sizeof(outarg); 2482 args.out_args[0].value = &outarg; 2483 err = fuse_simple_request(fm, &args); 2484 if (!err) 2485 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl); 2486 2487 return err; 2488 } 2489 2490 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 2491 { 2492 struct inode *inode = file_inode(file); 2493 struct fuse_mount *fm = get_fuse_mount(inode); 2494 FUSE_ARGS(args); 2495 struct fuse_lk_in inarg; 2496 int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 2497 struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL; 2498 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns); 2499 int err; 2500 2501 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 2502 /* NLM needs asynchronous locks, which we don't support yet */ 2503 return -ENOLCK; 2504 } 2505 2506 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); 2507 err = fuse_simple_request(fm, &args); 2508 2509 /* locking is restartable */ 2510 if (err == -EINTR) 2511 err = -ERESTARTSYS; 2512 2513 return err; 2514 } 2515 2516 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 2517 { 2518 struct inode *inode = file_inode(file); 2519 struct fuse_conn *fc = get_fuse_conn(inode); 2520 int err; 2521 2522 if (cmd == F_CANCELLK) { 2523 err = 0; 2524 } else if (cmd == F_GETLK) { 2525 if (fc->no_lock) { 2526 posix_test_lock(file, fl); 2527 err = 0; 2528 } else 2529 err = fuse_getlk(file, fl); 2530 } else { 2531 if (fc->no_lock) 2532 err = posix_lock_file(file, fl, NULL); 2533 else 2534 err = fuse_setlk(file, fl, 0); 2535 } 2536 return err; 2537 } 2538 2539 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 2540 { 2541 struct inode *inode = file_inode(file); 2542 struct fuse_conn *fc = get_fuse_conn(inode); 2543 int err; 2544 2545 if (fc->no_flock) { 2546 err = locks_lock_file_wait(file, fl); 2547 } else { 2548 struct fuse_file *ff = file->private_data; 2549 2550 /* emulate flock with POSIX locks */ 2551 ff->flock = true; 2552 err = fuse_setlk(file, fl, 1); 2553 } 2554 2555 return err; 2556 } 2557 2558 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 2559 { 2560 struct inode *inode = mapping->host; 2561 struct fuse_mount *fm = get_fuse_mount(inode); 2562 FUSE_ARGS(args); 2563 struct fuse_bmap_in inarg; 2564 struct fuse_bmap_out outarg; 2565 int err; 2566 2567 if (!inode->i_sb->s_bdev || fm->fc->no_bmap) 2568 return 0; 2569 2570 memset(&inarg, 0, sizeof(inarg)); 2571 inarg.block = block; 2572 inarg.blocksize = inode->i_sb->s_blocksize; 2573 args.opcode = FUSE_BMAP; 2574 args.nodeid = get_node_id(inode); 2575 args.in_numargs = 1; 2576 args.in_args[0].size = sizeof(inarg); 2577 args.in_args[0].value = &inarg; 2578 args.out_numargs = 1; 2579 args.out_args[0].size = sizeof(outarg); 2580 args.out_args[0].value = &outarg; 2581 err = fuse_simple_request(fm, &args); 2582 if (err == -ENOSYS) 2583 fm->fc->no_bmap = 1; 2584 2585 return err ? 0 : outarg.block; 2586 } 2587 2588 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) 2589 { 2590 struct inode *inode = file->f_mapping->host; 2591 struct fuse_mount *fm = get_fuse_mount(inode); 2592 struct fuse_file *ff = file->private_data; 2593 FUSE_ARGS(args); 2594 struct fuse_lseek_in inarg = { 2595 .fh = ff->fh, 2596 .offset = offset, 2597 .whence = whence 2598 }; 2599 struct fuse_lseek_out outarg; 2600 int err; 2601 2602 if (fm->fc->no_lseek) 2603 goto fallback; 2604 2605 args.opcode = FUSE_LSEEK; 2606 args.nodeid = ff->nodeid; 2607 args.in_numargs = 1; 2608 args.in_args[0].size = sizeof(inarg); 2609 args.in_args[0].value = &inarg; 2610 args.out_numargs = 1; 2611 args.out_args[0].size = sizeof(outarg); 2612 args.out_args[0].value = &outarg; 2613 err = fuse_simple_request(fm, &args); 2614 if (err) { 2615 if (err == -ENOSYS) { 2616 fm->fc->no_lseek = 1; 2617 goto fallback; 2618 } 2619 return err; 2620 } 2621 2622 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes); 2623 2624 fallback: 2625 err = fuse_update_attributes(inode, file, STATX_SIZE); 2626 if (!err) 2627 return generic_file_llseek(file, offset, whence); 2628 else 2629 return err; 2630 } 2631 2632 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 2633 { 2634 loff_t retval; 2635 struct inode *inode = file_inode(file); 2636 2637 switch (whence) { 2638 case SEEK_SET: 2639 case SEEK_CUR: 2640 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 2641 retval = generic_file_llseek(file, offset, whence); 2642 break; 2643 case SEEK_END: 2644 inode_lock(inode); 2645 retval = fuse_update_attributes(inode, file, STATX_SIZE); 2646 if (!retval) 2647 retval = generic_file_llseek(file, offset, whence); 2648 inode_unlock(inode); 2649 break; 2650 case SEEK_HOLE: 2651 case SEEK_DATA: 2652 inode_lock(inode); 2653 retval = fuse_lseek(file, offset, whence); 2654 inode_unlock(inode); 2655 break; 2656 default: 2657 retval = -EINVAL; 2658 } 2659 2660 return retval; 2661 } 2662 2663 /* 2664 * All files which have been polled are linked to RB tree 2665 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2666 * find the matching one. 2667 */ 2668 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2669 struct rb_node **parent_out) 2670 { 2671 struct rb_node **link = &fc->polled_files.rb_node; 2672 struct rb_node *last = NULL; 2673 2674 while (*link) { 2675 struct fuse_file *ff; 2676 2677 last = *link; 2678 ff = rb_entry(last, struct fuse_file, polled_node); 2679 2680 if (kh < ff->kh) 2681 link = &last->rb_left; 2682 else if (kh > ff->kh) 2683 link = &last->rb_right; 2684 else 2685 return link; 2686 } 2687 2688 if (parent_out) 2689 *parent_out = last; 2690 return link; 2691 } 2692 2693 /* 2694 * The file is about to be polled. Make sure it's on the polled_files 2695 * RB tree. Note that files once added to the polled_files tree are 2696 * not removed before the file is released. This is because a file 2697 * polled once is likely to be polled again. 2698 */ 2699 static void fuse_register_polled_file(struct fuse_conn *fc, 2700 struct fuse_file *ff) 2701 { 2702 spin_lock(&fc->lock); 2703 if (RB_EMPTY_NODE(&ff->polled_node)) { 2704 struct rb_node **link, *parent; 2705 2706 link = fuse_find_polled_node(fc, ff->kh, &parent); 2707 BUG_ON(*link); 2708 rb_link_node(&ff->polled_node, parent, link); 2709 rb_insert_color(&ff->polled_node, &fc->polled_files); 2710 } 2711 spin_unlock(&fc->lock); 2712 } 2713 2714 __poll_t fuse_file_poll(struct file *file, poll_table *wait) 2715 { 2716 struct fuse_file *ff = file->private_data; 2717 struct fuse_mount *fm = ff->fm; 2718 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2719 struct fuse_poll_out outarg; 2720 FUSE_ARGS(args); 2721 int err; 2722 2723 if (fm->fc->no_poll) 2724 return DEFAULT_POLLMASK; 2725 2726 poll_wait(file, &ff->poll_wait, wait); 2727 inarg.events = mangle_poll(poll_requested_events(wait)); 2728 2729 /* 2730 * Ask for notification iff there's someone waiting for it. 2731 * The client may ignore the flag and always notify. 2732 */ 2733 if (waitqueue_active(&ff->poll_wait)) { 2734 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2735 fuse_register_polled_file(fm->fc, ff); 2736 } 2737 2738 args.opcode = FUSE_POLL; 2739 args.nodeid = ff->nodeid; 2740 args.in_numargs = 1; 2741 args.in_args[0].size = sizeof(inarg); 2742 args.in_args[0].value = &inarg; 2743 args.out_numargs = 1; 2744 args.out_args[0].size = sizeof(outarg); 2745 args.out_args[0].value = &outarg; 2746 err = fuse_simple_request(fm, &args); 2747 2748 if (!err) 2749 return demangle_poll(outarg.revents); 2750 if (err == -ENOSYS) { 2751 fm->fc->no_poll = 1; 2752 return DEFAULT_POLLMASK; 2753 } 2754 return EPOLLERR; 2755 } 2756 EXPORT_SYMBOL_GPL(fuse_file_poll); 2757 2758 /* 2759 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2760 * wakes up the poll waiters. 2761 */ 2762 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2763 struct fuse_notify_poll_wakeup_out *outarg) 2764 { 2765 u64 kh = outarg->kh; 2766 struct rb_node **link; 2767 2768 spin_lock(&fc->lock); 2769 2770 link = fuse_find_polled_node(fc, kh, NULL); 2771 if (*link) { 2772 struct fuse_file *ff; 2773 2774 ff = rb_entry(*link, struct fuse_file, polled_node); 2775 wake_up_interruptible_sync(&ff->poll_wait); 2776 } 2777 2778 spin_unlock(&fc->lock); 2779 return 0; 2780 } 2781 2782 static void fuse_do_truncate(struct file *file) 2783 { 2784 struct inode *inode = file->f_mapping->host; 2785 struct iattr attr; 2786 2787 attr.ia_valid = ATTR_SIZE; 2788 attr.ia_size = i_size_read(inode); 2789 2790 attr.ia_file = file; 2791 attr.ia_valid |= ATTR_FILE; 2792 2793 fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file); 2794 } 2795 2796 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off) 2797 { 2798 return round_up(off, fc->max_pages << PAGE_SHIFT); 2799 } 2800 2801 static ssize_t 2802 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2803 { 2804 DECLARE_COMPLETION_ONSTACK(wait); 2805 ssize_t ret = 0; 2806 struct file *file = iocb->ki_filp; 2807 struct fuse_file *ff = file->private_data; 2808 loff_t pos = 0; 2809 struct inode *inode; 2810 loff_t i_size; 2811 size_t count = iov_iter_count(iter), shortened = 0; 2812 loff_t offset = iocb->ki_pos; 2813 struct fuse_io_priv *io; 2814 2815 pos = offset; 2816 inode = file->f_mapping->host; 2817 i_size = i_size_read(inode); 2818 2819 if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) 2820 return 0; 2821 2822 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2823 if (!io) 2824 return -ENOMEM; 2825 spin_lock_init(&io->lock); 2826 kref_init(&io->refcnt); 2827 io->reqs = 1; 2828 io->bytes = -1; 2829 io->size = 0; 2830 io->offset = offset; 2831 io->write = (iov_iter_rw(iter) == WRITE); 2832 io->err = 0; 2833 /* 2834 * By default, we want to optimize all I/Os with async request 2835 * submission to the client filesystem if supported. 2836 */ 2837 io->async = ff->fm->fc->async_dio; 2838 io->iocb = iocb; 2839 io->blocking = is_sync_kiocb(iocb); 2840 2841 /* optimization for short read */ 2842 if (io->async && !io->write && offset + count > i_size) { 2843 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset)); 2844 shortened = count - iov_iter_count(iter); 2845 count -= shortened; 2846 } 2847 2848 /* 2849 * We cannot asynchronously extend the size of a file. 2850 * In such case the aio will behave exactly like sync io. 2851 */ 2852 if ((offset + count > i_size) && io->write) 2853 io->blocking = true; 2854 2855 if (io->async && io->blocking) { 2856 /* 2857 * Additional reference to keep io around after 2858 * calling fuse_aio_complete() 2859 */ 2860 kref_get(&io->refcnt); 2861 io->done = &wait; 2862 } 2863 2864 if (iov_iter_rw(iter) == WRITE) { 2865 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); 2866 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 2867 } else { 2868 ret = __fuse_direct_read(io, iter, &pos); 2869 } 2870 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); 2871 2872 if (io->async) { 2873 bool blocking = io->blocking; 2874 2875 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2876 2877 /* we have a non-extending, async request, so return */ 2878 if (!blocking) 2879 return -EIOCBQUEUED; 2880 2881 wait_for_completion(&wait); 2882 ret = fuse_get_res_by_io(io); 2883 } 2884 2885 kref_put(&io->refcnt, fuse_io_release); 2886 2887 if (iov_iter_rw(iter) == WRITE) { 2888 fuse_write_update_attr(inode, pos, ret); 2889 /* For extending writes we already hold exclusive lock */ 2890 if (ret < 0 && offset + count > i_size) 2891 fuse_do_truncate(file); 2892 } 2893 2894 return ret; 2895 } 2896 2897 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) 2898 { 2899 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX); 2900 2901 if (!err) 2902 fuse_sync_writes(inode); 2903 2904 return err; 2905 } 2906 2907 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2908 loff_t length) 2909 { 2910 struct fuse_file *ff = file->private_data; 2911 struct inode *inode = file_inode(file); 2912 struct fuse_inode *fi = get_fuse_inode(inode); 2913 struct fuse_mount *fm = ff->fm; 2914 FUSE_ARGS(args); 2915 struct fuse_fallocate_in inarg = { 2916 .fh = ff->fh, 2917 .offset = offset, 2918 .length = length, 2919 .mode = mode 2920 }; 2921 int err; 2922 bool block_faults = FUSE_IS_DAX(inode) && 2923 (!(mode & FALLOC_FL_KEEP_SIZE) || 2924 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))); 2925 2926 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 2927 FALLOC_FL_ZERO_RANGE)) 2928 return -EOPNOTSUPP; 2929 2930 if (fm->fc->no_fallocate) 2931 return -EOPNOTSUPP; 2932 2933 inode_lock(inode); 2934 if (block_faults) { 2935 filemap_invalidate_lock(inode->i_mapping); 2936 err = fuse_dax_break_layouts(inode, 0, -1); 2937 if (err) 2938 goto out; 2939 } 2940 2941 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) { 2942 loff_t endbyte = offset + length - 1; 2943 2944 err = fuse_writeback_range(inode, offset, endbyte); 2945 if (err) 2946 goto out; 2947 } 2948 2949 if (!(mode & FALLOC_FL_KEEP_SIZE) && 2950 offset + length > i_size_read(inode)) { 2951 err = inode_newsize_ok(inode, offset + length); 2952 if (err) 2953 goto out; 2954 } 2955 2956 err = file_modified(file); 2957 if (err) 2958 goto out; 2959 2960 if (!(mode & FALLOC_FL_KEEP_SIZE)) 2961 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 2962 2963 args.opcode = FUSE_FALLOCATE; 2964 args.nodeid = ff->nodeid; 2965 args.in_numargs = 1; 2966 args.in_args[0].size = sizeof(inarg); 2967 args.in_args[0].value = &inarg; 2968 err = fuse_simple_request(fm, &args); 2969 if (err == -ENOSYS) { 2970 fm->fc->no_fallocate = 1; 2971 err = -EOPNOTSUPP; 2972 } 2973 if (err) 2974 goto out; 2975 2976 /* we could have extended the file */ 2977 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 2978 if (fuse_write_update_attr(inode, offset + length, length)) 2979 file_update_time(file); 2980 } 2981 2982 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) 2983 truncate_pagecache_range(inode, offset, offset + length - 1); 2984 2985 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 2986 2987 out: 2988 if (!(mode & FALLOC_FL_KEEP_SIZE)) 2989 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 2990 2991 if (block_faults) 2992 filemap_invalidate_unlock(inode->i_mapping); 2993 2994 inode_unlock(inode); 2995 2996 fuse_flush_time_update(inode); 2997 2998 return err; 2999 } 3000 3001 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, 3002 struct file *file_out, loff_t pos_out, 3003 size_t len, unsigned int flags) 3004 { 3005 struct fuse_file *ff_in = file_in->private_data; 3006 struct fuse_file *ff_out = file_out->private_data; 3007 struct inode *inode_in = file_inode(file_in); 3008 struct inode *inode_out = file_inode(file_out); 3009 struct fuse_inode *fi_out = get_fuse_inode(inode_out); 3010 struct fuse_mount *fm = ff_in->fm; 3011 struct fuse_conn *fc = fm->fc; 3012 FUSE_ARGS(args); 3013 struct fuse_copy_file_range_in inarg = { 3014 .fh_in = ff_in->fh, 3015 .off_in = pos_in, 3016 .nodeid_out = ff_out->nodeid, 3017 .fh_out = ff_out->fh, 3018 .off_out = pos_out, 3019 .len = len, 3020 .flags = flags 3021 }; 3022 struct fuse_write_out outarg; 3023 struct fuse_copy_file_range_out outarg_64; 3024 u64 bytes_copied; 3025 ssize_t err; 3026 /* mark unstable when write-back is not used, and file_out gets 3027 * extended */ 3028 bool is_unstable = (!fc->writeback_cache) && 3029 ((pos_out + len) > inode_out->i_size); 3030 3031 if (fc->no_copy_file_range) 3032 return -EOPNOTSUPP; 3033 3034 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) 3035 return -EXDEV; 3036 3037 inode_lock(inode_in); 3038 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); 3039 inode_unlock(inode_in); 3040 if (err) 3041 return err; 3042 3043 inode_lock(inode_out); 3044 3045 err = file_modified(file_out); 3046 if (err) 3047 goto out; 3048 3049 /* 3050 * Write out dirty pages in the destination file before sending the COPY 3051 * request to userspace. After the request is completed, truncate off 3052 * pages (including partial ones) from the cache that have been copied, 3053 * since these contain stale data at that point. 3054 * 3055 * This should be mostly correct, but if the COPY writes to partial 3056 * pages (at the start or end) and the parts not covered by the COPY are 3057 * written through a memory map after calling fuse_writeback_range(), 3058 * then these partial page modifications will be lost on truncation. 3059 * 3060 * It is unlikely that someone would rely on such mixed style 3061 * modifications. Yet this does give less guarantees than if the 3062 * copying was performed with write(2). 3063 * 3064 * To fix this a mapping->invalidate_lock could be used to prevent new 3065 * faults while the copy is ongoing. 3066 */ 3067 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); 3068 if (err) 3069 goto out; 3070 3071 if (is_unstable) 3072 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3073 3074 args.opcode = FUSE_COPY_FILE_RANGE_64; 3075 args.nodeid = ff_in->nodeid; 3076 args.in_numargs = 1; 3077 args.in_args[0].size = sizeof(inarg); 3078 args.in_args[0].value = &inarg; 3079 args.out_numargs = 1; 3080 args.out_args[0].size = sizeof(outarg_64); 3081 args.out_args[0].value = &outarg_64; 3082 if (fc->no_copy_file_range_64) { 3083 fallback: 3084 /* Fall back to old op that can't handle large copy length */ 3085 args.opcode = FUSE_COPY_FILE_RANGE; 3086 args.out_args[0].size = sizeof(outarg); 3087 args.out_args[0].value = &outarg; 3088 inarg.len = len = min_t(size_t, len, UINT_MAX & PAGE_MASK); 3089 } 3090 err = fuse_simple_request(fm, &args); 3091 if (err == -ENOSYS) { 3092 if (fc->no_copy_file_range_64) { 3093 fc->no_copy_file_range = 1; 3094 err = -EOPNOTSUPP; 3095 } else { 3096 fc->no_copy_file_range_64 = 1; 3097 goto fallback; 3098 } 3099 } 3100 if (err) 3101 goto out; 3102 3103 bytes_copied = fc->no_copy_file_range_64 ? 3104 outarg.size : outarg_64.bytes_copied; 3105 3106 if (bytes_copied > len) { 3107 err = -EIO; 3108 goto out; 3109 } 3110 3111 truncate_inode_pages_range(inode_out->i_mapping, 3112 ALIGN_DOWN(pos_out, PAGE_SIZE), 3113 ALIGN(pos_out + bytes_copied, PAGE_SIZE) - 1); 3114 3115 file_update_time(file_out); 3116 fuse_write_update_attr(inode_out, pos_out + bytes_copied, bytes_copied); 3117 3118 err = bytes_copied; 3119 out: 3120 if (is_unstable) 3121 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3122 3123 inode_unlock(inode_out); 3124 file_accessed(file_in); 3125 3126 fuse_flush_time_update(inode_out); 3127 3128 return err; 3129 } 3130 3131 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off, 3132 struct file *dst_file, loff_t dst_off, 3133 size_t len, unsigned int flags) 3134 { 3135 ssize_t ret; 3136 3137 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off, 3138 len, flags); 3139 3140 if (ret == -EOPNOTSUPP || ret == -EXDEV) 3141 ret = splice_copy_file_range(src_file, src_off, dst_file, 3142 dst_off, len); 3143 return ret; 3144 } 3145 3146 static const struct file_operations fuse_file_operations = { 3147 .llseek = fuse_file_llseek, 3148 .read_iter = fuse_file_read_iter, 3149 .write_iter = fuse_file_write_iter, 3150 .mmap = fuse_file_mmap, 3151 .open = fuse_open, 3152 .flush = fuse_flush, 3153 .release = fuse_release, 3154 .fsync = fuse_fsync, 3155 .lock = fuse_file_lock, 3156 .get_unmapped_area = thp_get_unmapped_area, 3157 .flock = fuse_file_flock, 3158 .splice_read = fuse_splice_read, 3159 .splice_write = fuse_splice_write, 3160 .unlocked_ioctl = fuse_file_ioctl, 3161 .compat_ioctl = fuse_file_compat_ioctl, 3162 .poll = fuse_file_poll, 3163 .fallocate = fuse_file_fallocate, 3164 .copy_file_range = fuse_copy_file_range, 3165 }; 3166 3167 static const struct address_space_operations fuse_file_aops = { 3168 .read_folio = fuse_read_folio, 3169 .readahead = fuse_readahead, 3170 .writepages = fuse_writepages, 3171 .launder_folio = fuse_launder_folio, 3172 .dirty_folio = iomap_dirty_folio, 3173 .release_folio = iomap_release_folio, 3174 .invalidate_folio = iomap_invalidate_folio, 3175 .is_partially_uptodate = iomap_is_partially_uptodate, 3176 .migrate_folio = filemap_migrate_folio, 3177 .bmap = fuse_bmap, 3178 .direct_IO = fuse_direct_IO, 3179 }; 3180 3181 void fuse_init_file_inode(struct inode *inode, unsigned int flags) 3182 { 3183 struct fuse_inode *fi = get_fuse_inode(inode); 3184 struct fuse_conn *fc = get_fuse_conn(inode); 3185 3186 inode->i_fop = &fuse_file_operations; 3187 inode->i_data.a_ops = &fuse_file_aops; 3188 if (fc->writeback_cache) 3189 mapping_set_writeback_may_deadlock_on_reclaim(&inode->i_data); 3190 3191 INIT_LIST_HEAD(&fi->write_files); 3192 INIT_LIST_HEAD(&fi->queued_writes); 3193 fi->writectr = 0; 3194 fi->iocachectr = 0; 3195 init_waitqueue_head(&fi->page_waitq); 3196 init_waitqueue_head(&fi->direct_io_waitq); 3197 3198 if (IS_ENABLED(CONFIG_FUSE_DAX)) 3199 fuse_dax_inode_init(inode, flags); 3200 } 3201