1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/sched/signal.h> 16 #include <linux/module.h> 17 #include <linux/swap.h> 18 #include <linux/falloc.h> 19 #include <linux/uio.h> 20 #include <linux/fs.h> 21 #include <linux/filelock.h> 22 #include <linux/splice.h> 23 #include <linux/task_io_accounting_ops.h> 24 25 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, 26 unsigned int open_flags, int opcode, 27 struct fuse_open_out *outargp) 28 { 29 struct fuse_open_in inarg; 30 FUSE_ARGS(args); 31 32 memset(&inarg, 0, sizeof(inarg)); 33 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 34 if (!fm->fc->atomic_o_trunc) 35 inarg.flags &= ~O_TRUNC; 36 37 if (fm->fc->handle_killpriv_v2 && 38 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) { 39 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID; 40 } 41 42 args.opcode = opcode; 43 args.nodeid = nodeid; 44 args.in_numargs = 1; 45 args.in_args[0].size = sizeof(inarg); 46 args.in_args[0].value = &inarg; 47 args.out_numargs = 1; 48 args.out_args[0].size = sizeof(*outargp); 49 args.out_args[0].value = outargp; 50 51 return fuse_simple_request(fm, &args); 52 } 53 54 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) 55 { 56 struct fuse_file *ff; 57 58 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT); 59 if (unlikely(!ff)) 60 return NULL; 61 62 ff->fm = fm; 63 if (release) { 64 ff->args = kzalloc(sizeof(*ff->args), GFP_KERNEL_ACCOUNT); 65 if (!ff->args) { 66 kfree(ff); 67 return NULL; 68 } 69 } 70 71 INIT_LIST_HEAD(&ff->write_entry); 72 refcount_set(&ff->count, 1); 73 RB_CLEAR_NODE(&ff->polled_node); 74 init_waitqueue_head(&ff->poll_wait); 75 76 ff->kh = atomic64_inc_return(&fm->fc->khctr); 77 78 return ff; 79 } 80 81 void fuse_file_free(struct fuse_file *ff) 82 { 83 kfree(ff->args); 84 kfree(ff); 85 } 86 87 static struct fuse_file *fuse_file_get(struct fuse_file *ff) 88 { 89 refcount_inc(&ff->count); 90 return ff; 91 } 92 93 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, 94 int error) 95 { 96 struct fuse_release_args *ra = container_of(args, typeof(*ra), args); 97 98 iput(ra->inode); 99 kfree(ra); 100 } 101 102 static void fuse_file_put(struct fuse_file *ff, bool sync) 103 { 104 if (refcount_dec_and_test(&ff->count)) { 105 struct fuse_release_args *ra = &ff->args->release_args; 106 struct fuse_args *args = (ra ? &ra->args : NULL); 107 108 if (ra && ra->inode) 109 fuse_file_io_release(ff, ra->inode); 110 111 if (!args) { 112 /* Do nothing when server does not implement 'open' */ 113 } else if (sync) { 114 fuse_simple_request(ff->fm, args); 115 fuse_release_end(ff->fm, args, 0); 116 } else { 117 args->end = fuse_release_end; 118 if (fuse_simple_background(ff->fm, args, 119 GFP_KERNEL | __GFP_NOFAIL)) 120 fuse_release_end(ff->fm, args, -ENOTCONN); 121 } 122 kfree(ff); 123 } 124 } 125 126 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, 127 unsigned int open_flags, bool isdir) 128 { 129 struct fuse_conn *fc = fm->fc; 130 struct fuse_file *ff; 131 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 132 bool open = isdir ? !fc->no_opendir : !fc->no_open; 133 134 ff = fuse_file_alloc(fm, open); 135 if (!ff) 136 return ERR_PTR(-ENOMEM); 137 138 ff->fh = 0; 139 /* Default for no-open */ 140 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); 141 if (open) { 142 /* Store outarg for fuse_finish_open() */ 143 struct fuse_open_out *outargp = &ff->args->open_outarg; 144 int err; 145 146 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp); 147 if (!err) { 148 ff->fh = outargp->fh; 149 ff->open_flags = outargp->open_flags; 150 } else if (err != -ENOSYS) { 151 fuse_file_free(ff); 152 return ERR_PTR(err); 153 } else { 154 /* No release needed */ 155 kfree(ff->args); 156 ff->args = NULL; 157 if (isdir) 158 fc->no_opendir = 1; 159 else 160 fc->no_open = 1; 161 } 162 } 163 164 if (isdir) 165 ff->open_flags &= ~FOPEN_DIRECT_IO; 166 167 ff->nodeid = nodeid; 168 169 return ff; 170 } 171 172 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file, 173 bool isdir) 174 { 175 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir); 176 177 if (!IS_ERR(ff)) 178 file->private_data = ff; 179 180 return PTR_ERR_OR_ZERO(ff); 181 } 182 EXPORT_SYMBOL_GPL(fuse_do_open); 183 184 static void fuse_link_write_file(struct file *file) 185 { 186 struct inode *inode = file_inode(file); 187 struct fuse_inode *fi = get_fuse_inode(inode); 188 struct fuse_file *ff = file->private_data; 189 /* 190 * file may be written through mmap, so chain it onto the 191 * inodes's write_file list 192 */ 193 spin_lock(&fi->lock); 194 if (list_empty(&ff->write_entry)) 195 list_add(&ff->write_entry, &fi->write_files); 196 spin_unlock(&fi->lock); 197 } 198 199 int fuse_finish_open(struct inode *inode, struct file *file) 200 { 201 struct fuse_file *ff = file->private_data; 202 struct fuse_conn *fc = get_fuse_conn(inode); 203 int err; 204 205 err = fuse_file_io_open(file, inode); 206 if (err) 207 return err; 208 209 if (ff->open_flags & FOPEN_STREAM) 210 stream_open(inode, file); 211 else if (ff->open_flags & FOPEN_NONSEEKABLE) 212 nonseekable_open(inode, file); 213 214 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 215 fuse_link_write_file(file); 216 217 return 0; 218 } 219 220 static void fuse_truncate_update_attr(struct inode *inode, struct file *file) 221 { 222 struct fuse_conn *fc = get_fuse_conn(inode); 223 struct fuse_inode *fi = get_fuse_inode(inode); 224 225 spin_lock(&fi->lock); 226 fi->attr_version = atomic64_inc_return(&fc->attr_version); 227 i_size_write(inode, 0); 228 spin_unlock(&fi->lock); 229 file_update_time(file); 230 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 231 } 232 233 static int fuse_open(struct inode *inode, struct file *file) 234 { 235 struct fuse_mount *fm = get_fuse_mount(inode); 236 struct fuse_inode *fi = get_fuse_inode(inode); 237 struct fuse_conn *fc = fm->fc; 238 struct fuse_file *ff; 239 int err; 240 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; 241 bool is_wb_truncate = is_truncate && fc->writeback_cache; 242 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode); 243 244 if (fuse_is_bad(inode)) 245 return -EIO; 246 247 err = generic_file_open(inode, file); 248 if (err) 249 return err; 250 251 if (is_wb_truncate || dax_truncate) 252 inode_lock(inode); 253 254 if (dax_truncate) { 255 filemap_invalidate_lock(inode->i_mapping); 256 err = fuse_dax_break_layouts(inode, 0, 0); 257 if (err) 258 goto out_inode_unlock; 259 } 260 261 if (is_wb_truncate || dax_truncate) 262 fuse_set_nowrite(inode); 263 264 err = fuse_do_open(fm, get_node_id(inode), file, false); 265 if (!err) { 266 ff = file->private_data; 267 err = fuse_finish_open(inode, file); 268 if (err) 269 fuse_sync_release(fi, ff, file->f_flags); 270 else if (is_truncate) 271 fuse_truncate_update_attr(inode, file); 272 } 273 274 if (is_wb_truncate || dax_truncate) 275 fuse_release_nowrite(inode); 276 if (!err) { 277 if (is_truncate) 278 truncate_pagecache(inode, 0); 279 else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 280 invalidate_inode_pages2(inode->i_mapping); 281 } 282 if (dax_truncate) 283 filemap_invalidate_unlock(inode->i_mapping); 284 out_inode_unlock: 285 if (is_wb_truncate || dax_truncate) 286 inode_unlock(inode); 287 288 return err; 289 } 290 291 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, 292 unsigned int flags, int opcode, bool sync) 293 { 294 struct fuse_conn *fc = ff->fm->fc; 295 struct fuse_release_args *ra = &ff->args->release_args; 296 297 if (fuse_file_passthrough(ff)) 298 fuse_passthrough_release(ff, fuse_inode_backing(fi)); 299 300 /* Inode is NULL on error path of fuse_create_open() */ 301 if (likely(fi)) { 302 spin_lock(&fi->lock); 303 list_del(&ff->write_entry); 304 spin_unlock(&fi->lock); 305 } 306 spin_lock(&fc->lock); 307 if (!RB_EMPTY_NODE(&ff->polled_node)) 308 rb_erase(&ff->polled_node, &fc->polled_files); 309 spin_unlock(&fc->lock); 310 311 wake_up_interruptible_all(&ff->poll_wait); 312 313 if (!ra) 314 return; 315 316 /* ff->args was used for open outarg */ 317 memset(ff->args, 0, sizeof(*ff->args)); 318 ra->inarg.fh = ff->fh; 319 ra->inarg.flags = flags; 320 ra->args.in_numargs = 1; 321 ra->args.in_args[0].size = sizeof(struct fuse_release_in); 322 ra->args.in_args[0].value = &ra->inarg; 323 ra->args.opcode = opcode; 324 ra->args.nodeid = ff->nodeid; 325 ra->args.force = true; 326 ra->args.nocreds = true; 327 328 /* 329 * Hold inode until release is finished. 330 * From fuse_sync_release() the refcount is 1 and everything's 331 * synchronous, so we are fine with not doing igrab() here. 332 */ 333 ra->inode = sync ? NULL : igrab(&fi->inode); 334 } 335 336 void fuse_file_release(struct inode *inode, struct fuse_file *ff, 337 unsigned int open_flags, fl_owner_t id, bool isdir) 338 { 339 struct fuse_inode *fi = get_fuse_inode(inode); 340 struct fuse_release_args *ra = &ff->args->release_args; 341 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; 342 343 fuse_prepare_release(fi, ff, open_flags, opcode, false); 344 345 if (ra && ff->flock) { 346 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 347 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); 348 } 349 350 /* 351 * Normally this will send the RELEASE request, however if 352 * some asynchronous READ or WRITE requests are outstanding, 353 * the sending will be delayed. 354 * 355 * Make the release synchronous if this is a fuseblk mount, 356 * synchronous RELEASE is allowed (and desirable) in this case 357 * because the server can be trusted not to screw up. 358 */ 359 fuse_file_put(ff, ff->fm->fc->destroy); 360 } 361 362 void fuse_release_common(struct file *file, bool isdir) 363 { 364 fuse_file_release(file_inode(file), file->private_data, file->f_flags, 365 (fl_owner_t) file, isdir); 366 } 367 368 static int fuse_release(struct inode *inode, struct file *file) 369 { 370 struct fuse_conn *fc = get_fuse_conn(inode); 371 372 /* 373 * Dirty pages might remain despite write_inode_now() call from 374 * fuse_flush() due to writes racing with the close. 375 */ 376 if (fc->writeback_cache) 377 write_inode_now(inode, 1); 378 379 fuse_release_common(file, false); 380 381 /* return value is ignored by VFS */ 382 return 0; 383 } 384 385 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, 386 unsigned int flags) 387 { 388 WARN_ON(refcount_read(&ff->count) > 1); 389 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); 390 fuse_file_put(ff, true); 391 } 392 EXPORT_SYMBOL_GPL(fuse_sync_release); 393 394 /* 395 * Scramble the ID space with XTEA, so that the value of the files_struct 396 * pointer is not exposed to userspace. 397 */ 398 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 399 { 400 u32 *k = fc->scramble_key; 401 u64 v = (unsigned long) id; 402 u32 v0 = v; 403 u32 v1 = v >> 32; 404 u32 sum = 0; 405 int i; 406 407 for (i = 0; i < 32; i++) { 408 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 409 sum += 0x9E3779B9; 410 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 411 } 412 413 return (u64) v0 + ((u64) v1 << 32); 414 } 415 416 struct fuse_writepage_args { 417 struct fuse_io_args ia; 418 struct rb_node writepages_entry; 419 struct list_head queue_entry; 420 struct fuse_writepage_args *next; 421 struct inode *inode; 422 struct fuse_sync_bucket *bucket; 423 }; 424 425 static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, 426 pgoff_t idx_from, pgoff_t idx_to) 427 { 428 struct rb_node *n; 429 430 n = fi->writepages.rb_node; 431 432 while (n) { 433 struct fuse_writepage_args *wpa; 434 pgoff_t curr_index; 435 436 wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); 437 WARN_ON(get_fuse_inode(wpa->inode) != fi); 438 curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; 439 if (idx_from >= curr_index + wpa->ia.ap.num_pages) 440 n = n->rb_right; 441 else if (idx_to < curr_index) 442 n = n->rb_left; 443 else 444 return wpa; 445 } 446 return NULL; 447 } 448 449 /* 450 * Check if any page in a range is under writeback 451 * 452 * This is currently done by walking the list of writepage requests 453 * for the inode, which can be pretty inefficient. 454 */ 455 static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, 456 pgoff_t idx_to) 457 { 458 struct fuse_inode *fi = get_fuse_inode(inode); 459 bool found; 460 461 spin_lock(&fi->lock); 462 found = fuse_find_writeback(fi, idx_from, idx_to); 463 spin_unlock(&fi->lock); 464 465 return found; 466 } 467 468 static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 469 { 470 return fuse_range_is_writeback(inode, index, index); 471 } 472 473 /* 474 * Wait for page writeback to be completed. 475 * 476 * Since fuse doesn't rely on the VM writeback tracking, this has to 477 * use some other means. 478 */ 479 static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 480 { 481 struct fuse_inode *fi = get_fuse_inode(inode); 482 483 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 484 } 485 486 /* 487 * Wait for all pending writepages on the inode to finish. 488 * 489 * This is currently done by blocking further writes with FUSE_NOWRITE 490 * and waiting for all sent writes to complete. 491 * 492 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 493 * could conflict with truncation. 494 */ 495 static void fuse_sync_writes(struct inode *inode) 496 { 497 fuse_set_nowrite(inode); 498 fuse_release_nowrite(inode); 499 } 500 501 static int fuse_flush(struct file *file, fl_owner_t id) 502 { 503 struct inode *inode = file_inode(file); 504 struct fuse_mount *fm = get_fuse_mount(inode); 505 struct fuse_file *ff = file->private_data; 506 struct fuse_flush_in inarg; 507 FUSE_ARGS(args); 508 int err; 509 510 if (fuse_is_bad(inode)) 511 return -EIO; 512 513 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache) 514 return 0; 515 516 err = write_inode_now(inode, 1); 517 if (err) 518 return err; 519 520 inode_lock(inode); 521 fuse_sync_writes(inode); 522 inode_unlock(inode); 523 524 err = filemap_check_errors(file->f_mapping); 525 if (err) 526 return err; 527 528 err = 0; 529 if (fm->fc->no_flush) 530 goto inval_attr_out; 531 532 memset(&inarg, 0, sizeof(inarg)); 533 inarg.fh = ff->fh; 534 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id); 535 args.opcode = FUSE_FLUSH; 536 args.nodeid = get_node_id(inode); 537 args.in_numargs = 1; 538 args.in_args[0].size = sizeof(inarg); 539 args.in_args[0].value = &inarg; 540 args.force = true; 541 542 err = fuse_simple_request(fm, &args); 543 if (err == -ENOSYS) { 544 fm->fc->no_flush = 1; 545 err = 0; 546 } 547 548 inval_attr_out: 549 /* 550 * In memory i_blocks is not maintained by fuse, if writeback cache is 551 * enabled, i_blocks from cached attr may not be accurate. 552 */ 553 if (!err && fm->fc->writeback_cache) 554 fuse_invalidate_attr_mask(inode, STATX_BLOCKS); 555 return err; 556 } 557 558 int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 559 int datasync, int opcode) 560 { 561 struct inode *inode = file->f_mapping->host; 562 struct fuse_mount *fm = get_fuse_mount(inode); 563 struct fuse_file *ff = file->private_data; 564 FUSE_ARGS(args); 565 struct fuse_fsync_in inarg; 566 567 memset(&inarg, 0, sizeof(inarg)); 568 inarg.fh = ff->fh; 569 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0; 570 args.opcode = opcode; 571 args.nodeid = get_node_id(inode); 572 args.in_numargs = 1; 573 args.in_args[0].size = sizeof(inarg); 574 args.in_args[0].value = &inarg; 575 return fuse_simple_request(fm, &args); 576 } 577 578 static int fuse_fsync(struct file *file, loff_t start, loff_t end, 579 int datasync) 580 { 581 struct inode *inode = file->f_mapping->host; 582 struct fuse_conn *fc = get_fuse_conn(inode); 583 int err; 584 585 if (fuse_is_bad(inode)) 586 return -EIO; 587 588 inode_lock(inode); 589 590 /* 591 * Start writeback against all dirty pages of the inode, then 592 * wait for all outstanding writes, before sending the FSYNC 593 * request. 594 */ 595 err = file_write_and_wait_range(file, start, end); 596 if (err) 597 goto out; 598 599 fuse_sync_writes(inode); 600 601 /* 602 * Due to implementation of fuse writeback 603 * file_write_and_wait_range() does not catch errors. 604 * We have to do this directly after fuse_sync_writes() 605 */ 606 err = file_check_and_advance_wb_err(file); 607 if (err) 608 goto out; 609 610 err = sync_inode_metadata(inode, 1); 611 if (err) 612 goto out; 613 614 if (fc->no_fsync) 615 goto out; 616 617 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); 618 if (err == -ENOSYS) { 619 fc->no_fsync = 1; 620 err = 0; 621 } 622 out: 623 inode_unlock(inode); 624 625 return err; 626 } 627 628 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, 629 size_t count, int opcode) 630 { 631 struct fuse_file *ff = file->private_data; 632 struct fuse_args *args = &ia->ap.args; 633 634 ia->read.in.fh = ff->fh; 635 ia->read.in.offset = pos; 636 ia->read.in.size = count; 637 ia->read.in.flags = file->f_flags; 638 args->opcode = opcode; 639 args->nodeid = ff->nodeid; 640 args->in_numargs = 1; 641 args->in_args[0].size = sizeof(ia->read.in); 642 args->in_args[0].value = &ia->read.in; 643 args->out_argvar = true; 644 args->out_numargs = 1; 645 args->out_args[0].size = count; 646 } 647 648 static void fuse_release_user_pages(struct fuse_args_pages *ap, 649 bool should_dirty) 650 { 651 unsigned int i; 652 653 for (i = 0; i < ap->num_pages; i++) { 654 if (should_dirty) 655 set_page_dirty_lock(ap->pages[i]); 656 if (ap->args.is_pinned) 657 unpin_user_page(ap->pages[i]); 658 } 659 } 660 661 static void fuse_io_release(struct kref *kref) 662 { 663 kfree(container_of(kref, struct fuse_io_priv, refcnt)); 664 } 665 666 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) 667 { 668 if (io->err) 669 return io->err; 670 671 if (io->bytes >= 0 && io->write) 672 return -EIO; 673 674 return io->bytes < 0 ? io->size : io->bytes; 675 } 676 677 /* 678 * In case of short read, the caller sets 'pos' to the position of 679 * actual end of fuse request in IO request. Otherwise, if bytes_requested 680 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 681 * 682 * An example: 683 * User requested DIO read of 64K. It was split into two 32K fuse requests, 684 * both submitted asynchronously. The first of them was ACKed by userspace as 685 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 686 * second request was ACKed as short, e.g. only 1K was read, resulting in 687 * pos == 33K. 688 * 689 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 690 * will be equal to the length of the longest contiguous fragment of 691 * transferred data starting from the beginning of IO request. 692 */ 693 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 694 { 695 int left; 696 697 spin_lock(&io->lock); 698 if (err) 699 io->err = io->err ? : err; 700 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 701 io->bytes = pos; 702 703 left = --io->reqs; 704 if (!left && io->blocking) 705 complete(io->done); 706 spin_unlock(&io->lock); 707 708 if (!left && !io->blocking) { 709 ssize_t res = fuse_get_res_by_io(io); 710 711 if (res >= 0) { 712 struct inode *inode = file_inode(io->iocb->ki_filp); 713 struct fuse_conn *fc = get_fuse_conn(inode); 714 struct fuse_inode *fi = get_fuse_inode(inode); 715 716 spin_lock(&fi->lock); 717 fi->attr_version = atomic64_inc_return(&fc->attr_version); 718 spin_unlock(&fi->lock); 719 } 720 721 io->iocb->ki_complete(io->iocb, res); 722 } 723 724 kref_put(&io->refcnt, fuse_io_release); 725 } 726 727 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, 728 unsigned int npages) 729 { 730 struct fuse_io_args *ia; 731 732 ia = kzalloc(sizeof(*ia), GFP_KERNEL); 733 if (ia) { 734 ia->io = io; 735 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, 736 &ia->ap.descs); 737 if (!ia->ap.pages) { 738 kfree(ia); 739 ia = NULL; 740 } 741 } 742 return ia; 743 } 744 745 static void fuse_io_free(struct fuse_io_args *ia) 746 { 747 kfree(ia->ap.pages); 748 kfree(ia); 749 } 750 751 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, 752 int err) 753 { 754 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 755 struct fuse_io_priv *io = ia->io; 756 ssize_t pos = -1; 757 758 fuse_release_user_pages(&ia->ap, io->should_dirty); 759 760 if (err) { 761 /* Nothing */ 762 } else if (io->write) { 763 if (ia->write.out.size > ia->write.in.size) { 764 err = -EIO; 765 } else if (ia->write.in.size != ia->write.out.size) { 766 pos = ia->write.in.offset - io->offset + 767 ia->write.out.size; 768 } 769 } else { 770 u32 outsize = args->out_args[0].size; 771 772 if (ia->read.in.size != outsize) 773 pos = ia->read.in.offset - io->offset + outsize; 774 } 775 776 fuse_aio_complete(io, err, pos); 777 fuse_io_free(ia); 778 } 779 780 static ssize_t fuse_async_req_send(struct fuse_mount *fm, 781 struct fuse_io_args *ia, size_t num_bytes) 782 { 783 ssize_t err; 784 struct fuse_io_priv *io = ia->io; 785 786 spin_lock(&io->lock); 787 kref_get(&io->refcnt); 788 io->size += num_bytes; 789 io->reqs++; 790 spin_unlock(&io->lock); 791 792 ia->ap.args.end = fuse_aio_complete_req; 793 ia->ap.args.may_block = io->should_dirty; 794 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL); 795 if (err) 796 fuse_aio_complete_req(fm, &ia->ap.args, err); 797 798 return num_bytes; 799 } 800 801 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count, 802 fl_owner_t owner) 803 { 804 struct file *file = ia->io->iocb->ki_filp; 805 struct fuse_file *ff = file->private_data; 806 struct fuse_mount *fm = ff->fm; 807 808 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 809 if (owner != NULL) { 810 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER; 811 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner); 812 } 813 814 if (ia->io->async) 815 return fuse_async_req_send(fm, ia, count); 816 817 return fuse_simple_request(fm, &ia->ap.args); 818 } 819 820 static void fuse_read_update_size(struct inode *inode, loff_t size, 821 u64 attr_ver) 822 { 823 struct fuse_conn *fc = get_fuse_conn(inode); 824 struct fuse_inode *fi = get_fuse_inode(inode); 825 826 spin_lock(&fi->lock); 827 if (attr_ver >= fi->attr_version && size < inode->i_size && 828 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 829 fi->attr_version = atomic64_inc_return(&fc->attr_version); 830 i_size_write(inode, size); 831 } 832 spin_unlock(&fi->lock); 833 } 834 835 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, 836 struct fuse_args_pages *ap) 837 { 838 struct fuse_conn *fc = get_fuse_conn(inode); 839 840 /* 841 * If writeback_cache is enabled, a short read means there's a hole in 842 * the file. Some data after the hole is in page cache, but has not 843 * reached the client fs yet. So the hole is not present there. 844 */ 845 if (!fc->writeback_cache) { 846 loff_t pos = page_offset(ap->pages[0]) + num_read; 847 fuse_read_update_size(inode, pos, attr_ver); 848 } 849 } 850 851 static int fuse_do_readpage(struct file *file, struct page *page) 852 { 853 struct inode *inode = page->mapping->host; 854 struct fuse_mount *fm = get_fuse_mount(inode); 855 loff_t pos = page_offset(page); 856 struct fuse_page_desc desc = { .length = PAGE_SIZE }; 857 struct fuse_io_args ia = { 858 .ap.args.page_zeroing = true, 859 .ap.args.out_pages = true, 860 .ap.num_pages = 1, 861 .ap.pages = &page, 862 .ap.descs = &desc, 863 }; 864 ssize_t res; 865 u64 attr_ver; 866 867 /* 868 * Page writeback can extend beyond the lifetime of the 869 * page-cache page, so make sure we read a properly synced 870 * page. 871 */ 872 fuse_wait_on_page_writeback(inode, page->index); 873 874 attr_ver = fuse_get_attr_version(fm->fc); 875 876 /* Don't overflow end offset */ 877 if (pos + (desc.length - 1) == LLONG_MAX) 878 desc.length--; 879 880 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ); 881 res = fuse_simple_request(fm, &ia.ap.args); 882 if (res < 0) 883 return res; 884 /* 885 * Short read means EOF. If file size is larger, truncate it 886 */ 887 if (res < desc.length) 888 fuse_short_read(inode, attr_ver, res, &ia.ap); 889 890 SetPageUptodate(page); 891 892 return 0; 893 } 894 895 static int fuse_read_folio(struct file *file, struct folio *folio) 896 { 897 struct page *page = &folio->page; 898 struct inode *inode = page->mapping->host; 899 int err; 900 901 err = -EIO; 902 if (fuse_is_bad(inode)) 903 goto out; 904 905 err = fuse_do_readpage(file, page); 906 fuse_invalidate_atime(inode); 907 out: 908 unlock_page(page); 909 return err; 910 } 911 912 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, 913 int err) 914 { 915 int i; 916 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); 917 struct fuse_args_pages *ap = &ia->ap; 918 size_t count = ia->read.in.size; 919 size_t num_read = args->out_args[0].size; 920 struct address_space *mapping = NULL; 921 922 for (i = 0; mapping == NULL && i < ap->num_pages; i++) 923 mapping = ap->pages[i]->mapping; 924 925 if (mapping) { 926 struct inode *inode = mapping->host; 927 928 /* 929 * Short read means EOF. If file size is larger, truncate it 930 */ 931 if (!err && num_read < count) 932 fuse_short_read(inode, ia->read.attr_ver, num_read, ap); 933 934 fuse_invalidate_atime(inode); 935 } 936 937 for (i = 0; i < ap->num_pages; i++) { 938 struct folio *folio = page_folio(ap->pages[i]); 939 940 folio_end_read(folio, !err); 941 folio_put(folio); 942 } 943 if (ia->ff) 944 fuse_file_put(ia->ff, false); 945 946 fuse_io_free(ia); 947 } 948 949 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) 950 { 951 struct fuse_file *ff = file->private_data; 952 struct fuse_mount *fm = ff->fm; 953 struct fuse_args_pages *ap = &ia->ap; 954 loff_t pos = page_offset(ap->pages[0]); 955 size_t count = ap->num_pages << PAGE_SHIFT; 956 ssize_t res; 957 int err; 958 959 ap->args.out_pages = true; 960 ap->args.page_zeroing = true; 961 ap->args.page_replace = true; 962 963 /* Don't overflow end offset */ 964 if (pos + (count - 1) == LLONG_MAX) { 965 count--; 966 ap->descs[ap->num_pages - 1].length--; 967 } 968 WARN_ON((loff_t) (pos + count) < 0); 969 970 fuse_read_args_fill(ia, file, pos, count, FUSE_READ); 971 ia->read.attr_ver = fuse_get_attr_version(fm->fc); 972 if (fm->fc->async_read) { 973 ia->ff = fuse_file_get(ff); 974 ap->args.end = fuse_readpages_end; 975 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL); 976 if (!err) 977 return; 978 } else { 979 res = fuse_simple_request(fm, &ap->args); 980 err = res < 0 ? res : 0; 981 } 982 fuse_readpages_end(fm, &ap->args, err); 983 } 984 985 static void fuse_readahead(struct readahead_control *rac) 986 { 987 struct inode *inode = rac->mapping->host; 988 struct fuse_conn *fc = get_fuse_conn(inode); 989 unsigned int i, max_pages, nr_pages = 0; 990 991 if (fuse_is_bad(inode)) 992 return; 993 994 max_pages = min_t(unsigned int, fc->max_pages, 995 fc->max_read / PAGE_SIZE); 996 997 for (;;) { 998 struct fuse_io_args *ia; 999 struct fuse_args_pages *ap; 1000 1001 if (fc->num_background >= fc->congestion_threshold && 1002 rac->ra->async_size >= readahead_count(rac)) 1003 /* 1004 * Congested and only async pages left, so skip the 1005 * rest. 1006 */ 1007 break; 1008 1009 nr_pages = readahead_count(rac) - nr_pages; 1010 if (nr_pages > max_pages) 1011 nr_pages = max_pages; 1012 if (nr_pages == 0) 1013 break; 1014 ia = fuse_io_alloc(NULL, nr_pages); 1015 if (!ia) 1016 return; 1017 ap = &ia->ap; 1018 nr_pages = __readahead_batch(rac, ap->pages, nr_pages); 1019 for (i = 0; i < nr_pages; i++) { 1020 fuse_wait_on_page_writeback(inode, 1021 readahead_index(rac) + i); 1022 ap->descs[i].length = PAGE_SIZE; 1023 } 1024 ap->num_pages = nr_pages; 1025 fuse_send_readpages(ia, rac->file); 1026 } 1027 } 1028 1029 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to) 1030 { 1031 struct inode *inode = iocb->ki_filp->f_mapping->host; 1032 struct fuse_conn *fc = get_fuse_conn(inode); 1033 1034 /* 1035 * In auto invalidate mode, always update attributes on read. 1036 * Otherwise, only update if we attempt to read past EOF (to ensure 1037 * i_size is up to date). 1038 */ 1039 if (fc->auto_inval_data || 1040 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) { 1041 int err; 1042 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE); 1043 if (err) 1044 return err; 1045 } 1046 1047 return generic_file_read_iter(iocb, to); 1048 } 1049 1050 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff, 1051 loff_t pos, size_t count) 1052 { 1053 struct fuse_args *args = &ia->ap.args; 1054 1055 ia->write.in.fh = ff->fh; 1056 ia->write.in.offset = pos; 1057 ia->write.in.size = count; 1058 args->opcode = FUSE_WRITE; 1059 args->nodeid = ff->nodeid; 1060 args->in_numargs = 2; 1061 if (ff->fm->fc->minor < 9) 1062 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 1063 else 1064 args->in_args[0].size = sizeof(ia->write.in); 1065 args->in_args[0].value = &ia->write.in; 1066 args->in_args[1].size = count; 1067 args->out_numargs = 1; 1068 args->out_args[0].size = sizeof(ia->write.out); 1069 args->out_args[0].value = &ia->write.out; 1070 } 1071 1072 static unsigned int fuse_write_flags(struct kiocb *iocb) 1073 { 1074 unsigned int flags = iocb->ki_filp->f_flags; 1075 1076 if (iocb_is_dsync(iocb)) 1077 flags |= O_DSYNC; 1078 if (iocb->ki_flags & IOCB_SYNC) 1079 flags |= O_SYNC; 1080 1081 return flags; 1082 } 1083 1084 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos, 1085 size_t count, fl_owner_t owner) 1086 { 1087 struct kiocb *iocb = ia->io->iocb; 1088 struct file *file = iocb->ki_filp; 1089 struct fuse_file *ff = file->private_data; 1090 struct fuse_mount *fm = ff->fm; 1091 struct fuse_write_in *inarg = &ia->write.in; 1092 ssize_t err; 1093 1094 fuse_write_args_fill(ia, ff, pos, count); 1095 inarg->flags = fuse_write_flags(iocb); 1096 if (owner != NULL) { 1097 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 1098 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner); 1099 } 1100 1101 if (ia->io->async) 1102 return fuse_async_req_send(fm, ia, count); 1103 1104 err = fuse_simple_request(fm, &ia->ap.args); 1105 if (!err && ia->write.out.size > count) 1106 err = -EIO; 1107 1108 return err ?: ia->write.out.size; 1109 } 1110 1111 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written) 1112 { 1113 struct fuse_conn *fc = get_fuse_conn(inode); 1114 struct fuse_inode *fi = get_fuse_inode(inode); 1115 bool ret = false; 1116 1117 spin_lock(&fi->lock); 1118 fi->attr_version = atomic64_inc_return(&fc->attr_version); 1119 if (written > 0 && pos > inode->i_size) { 1120 i_size_write(inode, pos); 1121 ret = true; 1122 } 1123 spin_unlock(&fi->lock); 1124 1125 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 1126 1127 return ret; 1128 } 1129 1130 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, 1131 struct kiocb *iocb, struct inode *inode, 1132 loff_t pos, size_t count) 1133 { 1134 struct fuse_args_pages *ap = &ia->ap; 1135 struct file *file = iocb->ki_filp; 1136 struct fuse_file *ff = file->private_data; 1137 struct fuse_mount *fm = ff->fm; 1138 unsigned int offset, i; 1139 bool short_write; 1140 int err; 1141 1142 for (i = 0; i < ap->num_pages; i++) 1143 fuse_wait_on_page_writeback(inode, ap->pages[i]->index); 1144 1145 fuse_write_args_fill(ia, ff, pos, count); 1146 ia->write.in.flags = fuse_write_flags(iocb); 1147 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID)) 1148 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; 1149 1150 err = fuse_simple_request(fm, &ap->args); 1151 if (!err && ia->write.out.size > count) 1152 err = -EIO; 1153 1154 short_write = ia->write.out.size < count; 1155 offset = ap->descs[0].offset; 1156 count = ia->write.out.size; 1157 for (i = 0; i < ap->num_pages; i++) { 1158 struct page *page = ap->pages[i]; 1159 1160 if (err) { 1161 ClearPageUptodate(page); 1162 } else { 1163 if (count >= PAGE_SIZE - offset) 1164 count -= PAGE_SIZE - offset; 1165 else { 1166 if (short_write) 1167 ClearPageUptodate(page); 1168 count = 0; 1169 } 1170 offset = 0; 1171 } 1172 if (ia->write.page_locked && (i == ap->num_pages - 1)) 1173 unlock_page(page); 1174 put_page(page); 1175 } 1176 1177 return err; 1178 } 1179 1180 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, 1181 struct address_space *mapping, 1182 struct iov_iter *ii, loff_t pos, 1183 unsigned int max_pages) 1184 { 1185 struct fuse_args_pages *ap = &ia->ap; 1186 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1187 unsigned offset = pos & (PAGE_SIZE - 1); 1188 size_t count = 0; 1189 int err; 1190 1191 ap->args.in_pages = true; 1192 ap->descs[0].offset = offset; 1193 1194 do { 1195 size_t tmp; 1196 struct page *page; 1197 pgoff_t index = pos >> PAGE_SHIFT; 1198 size_t bytes = min_t(size_t, PAGE_SIZE - offset, 1199 iov_iter_count(ii)); 1200 1201 bytes = min_t(size_t, bytes, fc->max_write - count); 1202 1203 again: 1204 err = -EFAULT; 1205 if (fault_in_iov_iter_readable(ii, bytes)) 1206 break; 1207 1208 err = -ENOMEM; 1209 page = grab_cache_page_write_begin(mapping, index); 1210 if (!page) 1211 break; 1212 1213 if (mapping_writably_mapped(mapping)) 1214 flush_dcache_page(page); 1215 1216 tmp = copy_page_from_iter_atomic(page, offset, bytes, ii); 1217 flush_dcache_page(page); 1218 1219 if (!tmp) { 1220 unlock_page(page); 1221 put_page(page); 1222 goto again; 1223 } 1224 1225 err = 0; 1226 ap->pages[ap->num_pages] = page; 1227 ap->descs[ap->num_pages].length = tmp; 1228 ap->num_pages++; 1229 1230 count += tmp; 1231 pos += tmp; 1232 offset += tmp; 1233 if (offset == PAGE_SIZE) 1234 offset = 0; 1235 1236 /* If we copied full page, mark it uptodate */ 1237 if (tmp == PAGE_SIZE) 1238 SetPageUptodate(page); 1239 1240 if (PageUptodate(page)) { 1241 unlock_page(page); 1242 } else { 1243 ia->write.page_locked = true; 1244 break; 1245 } 1246 if (!fc->big_writes) 1247 break; 1248 } while (iov_iter_count(ii) && count < fc->max_write && 1249 ap->num_pages < max_pages && offset == 0); 1250 1251 return count > 0 ? count : err; 1252 } 1253 1254 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len, 1255 unsigned int max_pages) 1256 { 1257 return min_t(unsigned int, 1258 ((pos + len - 1) >> PAGE_SHIFT) - 1259 (pos >> PAGE_SHIFT) + 1, 1260 max_pages); 1261 } 1262 1263 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) 1264 { 1265 struct address_space *mapping = iocb->ki_filp->f_mapping; 1266 struct inode *inode = mapping->host; 1267 struct fuse_conn *fc = get_fuse_conn(inode); 1268 struct fuse_inode *fi = get_fuse_inode(inode); 1269 loff_t pos = iocb->ki_pos; 1270 int err = 0; 1271 ssize_t res = 0; 1272 1273 if (inode->i_size < pos + iov_iter_count(ii)) 1274 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1275 1276 do { 1277 ssize_t count; 1278 struct fuse_io_args ia = {}; 1279 struct fuse_args_pages *ap = &ia.ap; 1280 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), 1281 fc->max_pages); 1282 1283 ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); 1284 if (!ap->pages) { 1285 err = -ENOMEM; 1286 break; 1287 } 1288 1289 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages); 1290 if (count <= 0) { 1291 err = count; 1292 } else { 1293 err = fuse_send_write_pages(&ia, iocb, inode, 1294 pos, count); 1295 if (!err) { 1296 size_t num_written = ia.write.out.size; 1297 1298 res += num_written; 1299 pos += num_written; 1300 1301 /* break out of the loop on short write */ 1302 if (num_written != count) 1303 err = -EIO; 1304 } 1305 } 1306 kfree(ap->pages); 1307 } while (!err && iov_iter_count(ii)); 1308 1309 fuse_write_update_attr(inode, pos, res); 1310 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1311 1312 if (!res) 1313 return err; 1314 iocb->ki_pos += res; 1315 return res; 1316 } 1317 1318 static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter) 1319 { 1320 struct inode *inode = file_inode(iocb->ki_filp); 1321 1322 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); 1323 } 1324 1325 /* 1326 * @return true if an exclusive lock for direct IO writes is needed 1327 */ 1328 static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from) 1329 { 1330 struct file *file = iocb->ki_filp; 1331 struct fuse_file *ff = file->private_data; 1332 struct inode *inode = file_inode(iocb->ki_filp); 1333 struct fuse_inode *fi = get_fuse_inode(inode); 1334 1335 /* Server side has to advise that it supports parallel dio writes. */ 1336 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) 1337 return true; 1338 1339 /* 1340 * Append will need to know the eventual EOF - always needs an 1341 * exclusive lock. 1342 */ 1343 if (iocb->ki_flags & IOCB_APPEND) 1344 return true; 1345 1346 /* shared locks are not allowed with parallel page cache IO */ 1347 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) 1348 return false; 1349 1350 /* Parallel dio beyond EOF is not supported, at least for now. */ 1351 if (fuse_io_past_eof(iocb, from)) 1352 return true; 1353 1354 return false; 1355 } 1356 1357 static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, 1358 bool *exclusive) 1359 { 1360 struct inode *inode = file_inode(iocb->ki_filp); 1361 struct fuse_inode *fi = get_fuse_inode(inode); 1362 1363 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); 1364 if (*exclusive) { 1365 inode_lock(inode); 1366 } else { 1367 inode_lock_shared(inode); 1368 /* 1369 * New parallal dio allowed only if inode is not in caching 1370 * mode and denies new opens in caching mode. This check 1371 * should be performed only after taking shared inode lock. 1372 * Previous past eof check was without inode lock and might 1373 * have raced, so check it again. 1374 */ 1375 if (fuse_io_past_eof(iocb, from) || 1376 fuse_inode_uncached_io_start(fi, NULL) != 0) { 1377 inode_unlock_shared(inode); 1378 inode_lock(inode); 1379 *exclusive = true; 1380 } 1381 } 1382 } 1383 1384 static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) 1385 { 1386 struct inode *inode = file_inode(iocb->ki_filp); 1387 struct fuse_inode *fi = get_fuse_inode(inode); 1388 1389 if (exclusive) { 1390 inode_unlock(inode); 1391 } else { 1392 /* Allow opens in caching mode after last parallel dio end */ 1393 fuse_inode_uncached_io_end(fi); 1394 inode_unlock_shared(inode); 1395 } 1396 } 1397 1398 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) 1399 { 1400 struct file *file = iocb->ki_filp; 1401 struct address_space *mapping = file->f_mapping; 1402 ssize_t written = 0; 1403 struct inode *inode = mapping->host; 1404 ssize_t err, count; 1405 struct fuse_conn *fc = get_fuse_conn(inode); 1406 1407 if (fc->writeback_cache) { 1408 /* Update size (EOF optimization) and mode (SUID clearing) */ 1409 err = fuse_update_attributes(mapping->host, file, 1410 STATX_SIZE | STATX_MODE); 1411 if (err) 1412 return err; 1413 1414 if (fc->handle_killpriv_v2 && 1415 setattr_should_drop_suidgid(&nop_mnt_idmap, 1416 file_inode(file))) { 1417 goto writethrough; 1418 } 1419 1420 return generic_file_write_iter(iocb, from); 1421 } 1422 1423 writethrough: 1424 inode_lock(inode); 1425 1426 err = count = generic_write_checks(iocb, from); 1427 if (err <= 0) 1428 goto out; 1429 1430 task_io_account_write(count); 1431 1432 err = file_remove_privs(file); 1433 if (err) 1434 goto out; 1435 1436 err = file_update_time(file); 1437 if (err) 1438 goto out; 1439 1440 if (iocb->ki_flags & IOCB_DIRECT) { 1441 written = generic_file_direct_write(iocb, from); 1442 if (written < 0 || !iov_iter_count(from)) 1443 goto out; 1444 written = direct_write_fallback(iocb, from, written, 1445 fuse_perform_write(iocb, from)); 1446 } else { 1447 written = fuse_perform_write(iocb, from); 1448 } 1449 out: 1450 inode_unlock(inode); 1451 if (written > 0) 1452 written = generic_write_sync(iocb, written); 1453 1454 return written ? written : err; 1455 } 1456 1457 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1458 { 1459 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset; 1460 } 1461 1462 static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1463 size_t max_size) 1464 { 1465 return min(iov_iter_single_seg_count(ii), max_size); 1466 } 1467 1468 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, 1469 size_t *nbytesp, int write, 1470 unsigned int max_pages) 1471 { 1472 size_t nbytes = 0; /* # bytes already packed in req */ 1473 ssize_t ret = 0; 1474 1475 /* Special case for kernel I/O: can copy directly into the buffer */ 1476 if (iov_iter_is_kvec(ii)) { 1477 unsigned long user_addr = fuse_get_user_addr(ii); 1478 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1479 1480 if (write) 1481 ap->args.in_args[1].value = (void *) user_addr; 1482 else 1483 ap->args.out_args[0].value = (void *) user_addr; 1484 1485 iov_iter_advance(ii, frag_size); 1486 *nbytesp = frag_size; 1487 return 0; 1488 } 1489 1490 while (nbytes < *nbytesp && ap->num_pages < max_pages) { 1491 unsigned npages; 1492 size_t start; 1493 struct page **pt_pages; 1494 1495 pt_pages = &ap->pages[ap->num_pages]; 1496 ret = iov_iter_extract_pages(ii, &pt_pages, 1497 *nbytesp - nbytes, 1498 max_pages - ap->num_pages, 1499 0, &start); 1500 if (ret < 0) 1501 break; 1502 1503 nbytes += ret; 1504 1505 ret += start; 1506 npages = DIV_ROUND_UP(ret, PAGE_SIZE); 1507 1508 ap->descs[ap->num_pages].offset = start; 1509 fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); 1510 1511 ap->num_pages += npages; 1512 ap->descs[ap->num_pages - 1].length -= 1513 (PAGE_SIZE - ret) & (PAGE_SIZE - 1); 1514 } 1515 1516 ap->args.is_pinned = iov_iter_extract_will_pin(ii); 1517 ap->args.user_pages = true; 1518 if (write) 1519 ap->args.in_pages = true; 1520 else 1521 ap->args.out_pages = true; 1522 1523 *nbytesp = nbytes; 1524 1525 return ret < 0 ? ret : 0; 1526 } 1527 1528 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, 1529 loff_t *ppos, int flags) 1530 { 1531 int write = flags & FUSE_DIO_WRITE; 1532 int cuse = flags & FUSE_DIO_CUSE; 1533 struct file *file = io->iocb->ki_filp; 1534 struct address_space *mapping = file->f_mapping; 1535 struct inode *inode = mapping->host; 1536 struct fuse_file *ff = file->private_data; 1537 struct fuse_conn *fc = ff->fm->fc; 1538 size_t nmax = write ? fc->max_write : fc->max_read; 1539 loff_t pos = *ppos; 1540 size_t count = iov_iter_count(iter); 1541 pgoff_t idx_from = pos >> PAGE_SHIFT; 1542 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; 1543 ssize_t res = 0; 1544 int err = 0; 1545 struct fuse_io_args *ia; 1546 unsigned int max_pages; 1547 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO; 1548 1549 max_pages = iov_iter_npages(iter, fc->max_pages); 1550 ia = fuse_io_alloc(io, max_pages); 1551 if (!ia) 1552 return -ENOMEM; 1553 1554 if (fopen_direct_io && fc->direct_io_allow_mmap) { 1555 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); 1556 if (res) { 1557 fuse_io_free(ia); 1558 return res; 1559 } 1560 } 1561 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { 1562 if (!write) 1563 inode_lock(inode); 1564 fuse_sync_writes(inode); 1565 if (!write) 1566 inode_unlock(inode); 1567 } 1568 1569 if (fopen_direct_io && write) { 1570 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to); 1571 if (res) { 1572 fuse_io_free(ia); 1573 return res; 1574 } 1575 } 1576 1577 io->should_dirty = !write && user_backed_iter(iter); 1578 while (count) { 1579 ssize_t nres; 1580 fl_owner_t owner = current->files; 1581 size_t nbytes = min(count, nmax); 1582 1583 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, 1584 max_pages); 1585 if (err && !nbytes) 1586 break; 1587 1588 if (write) { 1589 if (!capable(CAP_FSETID)) 1590 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; 1591 1592 nres = fuse_send_write(ia, pos, nbytes, owner); 1593 } else { 1594 nres = fuse_send_read(ia, pos, nbytes, owner); 1595 } 1596 1597 if (!io->async || nres < 0) { 1598 fuse_release_user_pages(&ia->ap, io->should_dirty); 1599 fuse_io_free(ia); 1600 } 1601 ia = NULL; 1602 if (nres < 0) { 1603 iov_iter_revert(iter, nbytes); 1604 err = nres; 1605 break; 1606 } 1607 WARN_ON(nres > nbytes); 1608 1609 count -= nres; 1610 res += nres; 1611 pos += nres; 1612 if (nres != nbytes) { 1613 iov_iter_revert(iter, nbytes - nres); 1614 break; 1615 } 1616 if (count) { 1617 max_pages = iov_iter_npages(iter, fc->max_pages); 1618 ia = fuse_io_alloc(io, max_pages); 1619 if (!ia) 1620 break; 1621 } 1622 } 1623 if (ia) 1624 fuse_io_free(ia); 1625 if (res > 0) 1626 *ppos = pos; 1627 1628 return res > 0 ? res : err; 1629 } 1630 EXPORT_SYMBOL_GPL(fuse_direct_io); 1631 1632 static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1633 struct iov_iter *iter, 1634 loff_t *ppos) 1635 { 1636 ssize_t res; 1637 struct inode *inode = file_inode(io->iocb->ki_filp); 1638 1639 res = fuse_direct_io(io, iter, ppos, 0); 1640 1641 fuse_invalidate_atime(inode); 1642 1643 return res; 1644 } 1645 1646 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter); 1647 1648 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) 1649 { 1650 ssize_t res; 1651 1652 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { 1653 res = fuse_direct_IO(iocb, to); 1654 } else { 1655 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1656 1657 res = __fuse_direct_read(&io, to, &iocb->ki_pos); 1658 } 1659 1660 return res; 1661 } 1662 1663 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) 1664 { 1665 struct inode *inode = file_inode(iocb->ki_filp); 1666 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); 1667 ssize_t res; 1668 bool exclusive; 1669 1670 fuse_dio_lock(iocb, from, &exclusive); 1671 res = generic_write_checks(iocb, from); 1672 if (res > 0) { 1673 task_io_account_write(res); 1674 if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { 1675 res = fuse_direct_IO(iocb, from); 1676 } else { 1677 res = fuse_direct_io(&io, from, &iocb->ki_pos, 1678 FUSE_DIO_WRITE); 1679 fuse_write_update_attr(inode, iocb->ki_pos, res); 1680 } 1681 } 1682 fuse_dio_unlock(iocb, exclusive); 1683 1684 return res; 1685 } 1686 1687 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1688 { 1689 struct file *file = iocb->ki_filp; 1690 struct fuse_file *ff = file->private_data; 1691 struct inode *inode = file_inode(file); 1692 1693 if (fuse_is_bad(inode)) 1694 return -EIO; 1695 1696 if (FUSE_IS_DAX(inode)) 1697 return fuse_dax_read_iter(iocb, to); 1698 1699 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1700 if (ff->open_flags & FOPEN_DIRECT_IO) 1701 return fuse_direct_read_iter(iocb, to); 1702 else if (fuse_file_passthrough(ff)) 1703 return fuse_passthrough_read_iter(iocb, to); 1704 else 1705 return fuse_cache_read_iter(iocb, to); 1706 } 1707 1708 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1709 { 1710 struct file *file = iocb->ki_filp; 1711 struct fuse_file *ff = file->private_data; 1712 struct inode *inode = file_inode(file); 1713 1714 if (fuse_is_bad(inode)) 1715 return -EIO; 1716 1717 if (FUSE_IS_DAX(inode)) 1718 return fuse_dax_write_iter(iocb, from); 1719 1720 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1721 if (ff->open_flags & FOPEN_DIRECT_IO) 1722 return fuse_direct_write_iter(iocb, from); 1723 else if (fuse_file_passthrough(ff)) 1724 return fuse_passthrough_write_iter(iocb, from); 1725 else 1726 return fuse_cache_write_iter(iocb, from); 1727 } 1728 1729 static ssize_t fuse_splice_read(struct file *in, loff_t *ppos, 1730 struct pipe_inode_info *pipe, size_t len, 1731 unsigned int flags) 1732 { 1733 struct fuse_file *ff = in->private_data; 1734 1735 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1736 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO)) 1737 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags); 1738 else 1739 return filemap_splice_read(in, ppos, pipe, len, flags); 1740 } 1741 1742 static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out, 1743 loff_t *ppos, size_t len, unsigned int flags) 1744 { 1745 struct fuse_file *ff = out->private_data; 1746 1747 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */ 1748 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO)) 1749 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags); 1750 else 1751 return iter_file_splice_write(pipe, out, ppos, len, flags); 1752 } 1753 1754 static void fuse_writepage_free(struct fuse_writepage_args *wpa) 1755 { 1756 struct fuse_args_pages *ap = &wpa->ia.ap; 1757 int i; 1758 1759 if (wpa->bucket) 1760 fuse_sync_bucket_dec(wpa->bucket); 1761 1762 for (i = 0; i < ap->num_pages; i++) 1763 __free_page(ap->pages[i]); 1764 1765 if (wpa->ia.ff) 1766 fuse_file_put(wpa->ia.ff, false); 1767 1768 kfree(ap->pages); 1769 kfree(wpa); 1770 } 1771 1772 static void fuse_writepage_finish(struct fuse_mount *fm, 1773 struct fuse_writepage_args *wpa) 1774 { 1775 struct fuse_args_pages *ap = &wpa->ia.ap; 1776 struct inode *inode = wpa->inode; 1777 struct fuse_inode *fi = get_fuse_inode(inode); 1778 struct backing_dev_info *bdi = inode_to_bdi(inode); 1779 int i; 1780 1781 for (i = 0; i < ap->num_pages; i++) { 1782 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1783 dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); 1784 wb_writeout_inc(&bdi->wb); 1785 } 1786 wake_up(&fi->page_waitq); 1787 } 1788 1789 /* Called under fi->lock, may release and reacquire it */ 1790 static void fuse_send_writepage(struct fuse_mount *fm, 1791 struct fuse_writepage_args *wpa, loff_t size) 1792 __releases(fi->lock) 1793 __acquires(fi->lock) 1794 { 1795 struct fuse_writepage_args *aux, *next; 1796 struct fuse_inode *fi = get_fuse_inode(wpa->inode); 1797 struct fuse_write_in *inarg = &wpa->ia.write.in; 1798 struct fuse_args *args = &wpa->ia.ap.args; 1799 __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE; 1800 int err; 1801 1802 fi->writectr++; 1803 if (inarg->offset + data_size <= size) { 1804 inarg->size = data_size; 1805 } else if (inarg->offset < size) { 1806 inarg->size = size - inarg->offset; 1807 } else { 1808 /* Got truncated off completely */ 1809 goto out_free; 1810 } 1811 1812 args->in_args[1].size = inarg->size; 1813 args->force = true; 1814 args->nocreds = true; 1815 1816 err = fuse_simple_background(fm, args, GFP_ATOMIC); 1817 if (err == -ENOMEM) { 1818 spin_unlock(&fi->lock); 1819 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL); 1820 spin_lock(&fi->lock); 1821 } 1822 1823 /* Fails on broken connection only */ 1824 if (unlikely(err)) 1825 goto out_free; 1826 1827 return; 1828 1829 out_free: 1830 fi->writectr--; 1831 rb_erase(&wpa->writepages_entry, &fi->writepages); 1832 fuse_writepage_finish(fm, wpa); 1833 spin_unlock(&fi->lock); 1834 1835 /* After rb_erase() aux request list is private */ 1836 for (aux = wpa->next; aux; aux = next) { 1837 struct backing_dev_info *bdi = inode_to_bdi(aux->inode); 1838 1839 next = aux->next; 1840 aux->next = NULL; 1841 1842 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1843 dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP); 1844 wb_writeout_inc(&bdi->wb); 1845 fuse_writepage_free(aux); 1846 } 1847 1848 fuse_writepage_free(wpa); 1849 spin_lock(&fi->lock); 1850 } 1851 1852 /* 1853 * If fi->writectr is positive (no truncate or fsync going on) send 1854 * all queued writepage requests. 1855 * 1856 * Called with fi->lock 1857 */ 1858 void fuse_flush_writepages(struct inode *inode) 1859 __releases(fi->lock) 1860 __acquires(fi->lock) 1861 { 1862 struct fuse_mount *fm = get_fuse_mount(inode); 1863 struct fuse_inode *fi = get_fuse_inode(inode); 1864 loff_t crop = i_size_read(inode); 1865 struct fuse_writepage_args *wpa; 1866 1867 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1868 wpa = list_entry(fi->queued_writes.next, 1869 struct fuse_writepage_args, queue_entry); 1870 list_del_init(&wpa->queue_entry); 1871 fuse_send_writepage(fm, wpa, crop); 1872 } 1873 } 1874 1875 static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, 1876 struct fuse_writepage_args *wpa) 1877 { 1878 pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; 1879 pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; 1880 struct rb_node **p = &root->rb_node; 1881 struct rb_node *parent = NULL; 1882 1883 WARN_ON(!wpa->ia.ap.num_pages); 1884 while (*p) { 1885 struct fuse_writepage_args *curr; 1886 pgoff_t curr_index; 1887 1888 parent = *p; 1889 curr = rb_entry(parent, struct fuse_writepage_args, 1890 writepages_entry); 1891 WARN_ON(curr->inode != wpa->inode); 1892 curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; 1893 1894 if (idx_from >= curr_index + curr->ia.ap.num_pages) 1895 p = &(*p)->rb_right; 1896 else if (idx_to < curr_index) 1897 p = &(*p)->rb_left; 1898 else 1899 return curr; 1900 } 1901 1902 rb_link_node(&wpa->writepages_entry, parent, p); 1903 rb_insert_color(&wpa->writepages_entry, root); 1904 return NULL; 1905 } 1906 1907 static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) 1908 { 1909 WARN_ON(fuse_insert_writeback(root, wpa)); 1910 } 1911 1912 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, 1913 int error) 1914 { 1915 struct fuse_writepage_args *wpa = 1916 container_of(args, typeof(*wpa), ia.ap.args); 1917 struct inode *inode = wpa->inode; 1918 struct fuse_inode *fi = get_fuse_inode(inode); 1919 struct fuse_conn *fc = get_fuse_conn(inode); 1920 1921 mapping_set_error(inode->i_mapping, error); 1922 /* 1923 * A writeback finished and this might have updated mtime/ctime on 1924 * server making local mtime/ctime stale. Hence invalidate attrs. 1925 * Do this only if writeback_cache is not enabled. If writeback_cache 1926 * is enabled, we trust local ctime/mtime. 1927 */ 1928 if (!fc->writeback_cache) 1929 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY); 1930 spin_lock(&fi->lock); 1931 rb_erase(&wpa->writepages_entry, &fi->writepages); 1932 while (wpa->next) { 1933 struct fuse_mount *fm = get_fuse_mount(inode); 1934 struct fuse_write_in *inarg = &wpa->ia.write.in; 1935 struct fuse_writepage_args *next = wpa->next; 1936 1937 wpa->next = next->next; 1938 next->next = NULL; 1939 next->ia.ff = fuse_file_get(wpa->ia.ff); 1940 tree_insert(&fi->writepages, next); 1941 1942 /* 1943 * Skip fuse_flush_writepages() to make it easy to crop requests 1944 * based on primary request size. 1945 * 1946 * 1st case (trivial): there are no concurrent activities using 1947 * fuse_set/release_nowrite. Then we're on safe side because 1948 * fuse_flush_writepages() would call fuse_send_writepage() 1949 * anyway. 1950 * 1951 * 2nd case: someone called fuse_set_nowrite and it is waiting 1952 * now for completion of all in-flight requests. This happens 1953 * rarely and no more than once per page, so this should be 1954 * okay. 1955 * 1956 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle 1957 * of fuse_set_nowrite..fuse_release_nowrite section. The fact 1958 * that fuse_set_nowrite returned implies that all in-flight 1959 * requests were completed along with all of their secondary 1960 * requests. Further primary requests are blocked by negative 1961 * writectr. Hence there cannot be any in-flight requests and 1962 * no invocations of fuse_writepage_end() while we're in 1963 * fuse_set_nowrite..fuse_release_nowrite section. 1964 */ 1965 fuse_send_writepage(fm, next, inarg->offset + inarg->size); 1966 } 1967 fi->writectr--; 1968 fuse_writepage_finish(fm, wpa); 1969 spin_unlock(&fi->lock); 1970 fuse_writepage_free(wpa); 1971 } 1972 1973 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi) 1974 { 1975 struct fuse_file *ff; 1976 1977 spin_lock(&fi->lock); 1978 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file, 1979 write_entry); 1980 if (ff) 1981 fuse_file_get(ff); 1982 spin_unlock(&fi->lock); 1983 1984 return ff; 1985 } 1986 1987 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi) 1988 { 1989 struct fuse_file *ff = __fuse_write_file_get(fi); 1990 WARN_ON(!ff); 1991 return ff; 1992 } 1993 1994 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 1995 { 1996 struct fuse_inode *fi = get_fuse_inode(inode); 1997 struct fuse_file *ff; 1998 int err; 1999 2000 /* 2001 * Inode is always written before the last reference is dropped and 2002 * hence this should not be reached from reclaim. 2003 * 2004 * Writing back the inode from reclaim can deadlock if the request 2005 * processing itself needs an allocation. Allocations triggering 2006 * reclaim while serving a request can't be prevented, because it can 2007 * involve any number of unrelated userspace processes. 2008 */ 2009 WARN_ON(wbc->for_reclaim); 2010 2011 ff = __fuse_write_file_get(fi); 2012 err = fuse_flush_times(inode, ff); 2013 if (ff) 2014 fuse_file_put(ff, false); 2015 2016 return err; 2017 } 2018 2019 static struct fuse_writepage_args *fuse_writepage_args_alloc(void) 2020 { 2021 struct fuse_writepage_args *wpa; 2022 struct fuse_args_pages *ap; 2023 2024 wpa = kzalloc(sizeof(*wpa), GFP_NOFS); 2025 if (wpa) { 2026 ap = &wpa->ia.ap; 2027 ap->num_pages = 0; 2028 ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); 2029 if (!ap->pages) { 2030 kfree(wpa); 2031 wpa = NULL; 2032 } 2033 } 2034 return wpa; 2035 2036 } 2037 2038 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, 2039 struct fuse_writepage_args *wpa) 2040 { 2041 if (!fc->sync_fs) 2042 return; 2043 2044 rcu_read_lock(); 2045 /* Prevent resurrection of dead bucket in unlikely race with syncfs */ 2046 do { 2047 wpa->bucket = rcu_dereference(fc->curr_bucket); 2048 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count))); 2049 rcu_read_unlock(); 2050 } 2051 2052 static int fuse_writepage_locked(struct folio *folio) 2053 { 2054 struct address_space *mapping = folio->mapping; 2055 struct inode *inode = mapping->host; 2056 struct fuse_conn *fc = get_fuse_conn(inode); 2057 struct fuse_inode *fi = get_fuse_inode(inode); 2058 struct fuse_writepage_args *wpa; 2059 struct fuse_args_pages *ap; 2060 struct folio *tmp_folio; 2061 int error = -ENOMEM; 2062 2063 folio_start_writeback(folio); 2064 2065 wpa = fuse_writepage_args_alloc(); 2066 if (!wpa) 2067 goto err; 2068 ap = &wpa->ia.ap; 2069 2070 tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); 2071 if (!tmp_folio) 2072 goto err_free; 2073 2074 error = -EIO; 2075 wpa->ia.ff = fuse_write_file_get(fi); 2076 if (!wpa->ia.ff) 2077 goto err_nofile; 2078 2079 fuse_writepage_add_to_bucket(fc, wpa); 2080 fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0); 2081 2082 folio_copy(tmp_folio, folio); 2083 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 2084 wpa->next = NULL; 2085 ap->args.in_pages = true; 2086 ap->num_pages = 1; 2087 ap->pages[0] = &tmp_folio->page; 2088 ap->descs[0].offset = 0; 2089 ap->descs[0].length = PAGE_SIZE; 2090 ap->args.end = fuse_writepage_end; 2091 wpa->inode = inode; 2092 2093 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 2094 node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); 2095 2096 spin_lock(&fi->lock); 2097 tree_insert(&fi->writepages, wpa); 2098 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 2099 fuse_flush_writepages(inode); 2100 spin_unlock(&fi->lock); 2101 2102 folio_end_writeback(folio); 2103 2104 return 0; 2105 2106 err_nofile: 2107 folio_put(tmp_folio); 2108 err_free: 2109 kfree(wpa); 2110 err: 2111 mapping_set_error(folio->mapping, error); 2112 folio_end_writeback(folio); 2113 return error; 2114 } 2115 2116 struct fuse_fill_wb_data { 2117 struct fuse_writepage_args *wpa; 2118 struct fuse_file *ff; 2119 struct inode *inode; 2120 struct page **orig_pages; 2121 unsigned int max_pages; 2122 }; 2123 2124 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) 2125 { 2126 struct fuse_args_pages *ap = &data->wpa->ia.ap; 2127 struct fuse_conn *fc = get_fuse_conn(data->inode); 2128 struct page **pages; 2129 struct fuse_page_desc *descs; 2130 unsigned int npages = min_t(unsigned int, 2131 max_t(unsigned int, data->max_pages * 2, 2132 FUSE_DEFAULT_MAX_PAGES_PER_REQ), 2133 fc->max_pages); 2134 WARN_ON(npages <= data->max_pages); 2135 2136 pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); 2137 if (!pages) 2138 return false; 2139 2140 memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); 2141 memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages); 2142 kfree(ap->pages); 2143 ap->pages = pages; 2144 ap->descs = descs; 2145 data->max_pages = npages; 2146 2147 return true; 2148 } 2149 2150 static void fuse_writepages_send(struct fuse_fill_wb_data *data) 2151 { 2152 struct fuse_writepage_args *wpa = data->wpa; 2153 struct inode *inode = data->inode; 2154 struct fuse_inode *fi = get_fuse_inode(inode); 2155 int num_pages = wpa->ia.ap.num_pages; 2156 int i; 2157 2158 wpa->ia.ff = fuse_file_get(data->ff); 2159 spin_lock(&fi->lock); 2160 list_add_tail(&wpa->queue_entry, &fi->queued_writes); 2161 fuse_flush_writepages(inode); 2162 spin_unlock(&fi->lock); 2163 2164 for (i = 0; i < num_pages; i++) 2165 end_page_writeback(data->orig_pages[i]); 2166 } 2167 2168 /* 2169 * Check under fi->lock if the page is under writeback, and insert it onto the 2170 * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's 2171 * one already added for a page at this offset. If there's none, then insert 2172 * this new request onto the auxiliary list, otherwise reuse the existing one by 2173 * swapping the new temp page with the old one. 2174 */ 2175 static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, 2176 struct page *page) 2177 { 2178 struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); 2179 struct fuse_writepage_args *tmp; 2180 struct fuse_writepage_args *old_wpa; 2181 struct fuse_args_pages *new_ap = &new_wpa->ia.ap; 2182 2183 WARN_ON(new_ap->num_pages != 0); 2184 new_ap->num_pages = 1; 2185 2186 spin_lock(&fi->lock); 2187 old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); 2188 if (!old_wpa) { 2189 spin_unlock(&fi->lock); 2190 return true; 2191 } 2192 2193 for (tmp = old_wpa->next; tmp; tmp = tmp->next) { 2194 pgoff_t curr_index; 2195 2196 WARN_ON(tmp->inode != new_wpa->inode); 2197 curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; 2198 if (curr_index == page->index) { 2199 WARN_ON(tmp->ia.ap.num_pages != 1); 2200 swap(tmp->ia.ap.pages[0], new_ap->pages[0]); 2201 break; 2202 } 2203 } 2204 2205 if (!tmp) { 2206 new_wpa->next = old_wpa->next; 2207 old_wpa->next = new_wpa; 2208 } 2209 2210 spin_unlock(&fi->lock); 2211 2212 if (tmp) { 2213 struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode); 2214 2215 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 2216 dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); 2217 wb_writeout_inc(&bdi->wb); 2218 fuse_writepage_free(new_wpa); 2219 } 2220 2221 return false; 2222 } 2223 2224 static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, 2225 struct fuse_args_pages *ap, 2226 struct fuse_fill_wb_data *data) 2227 { 2228 WARN_ON(!ap->num_pages); 2229 2230 /* 2231 * Being under writeback is unlikely but possible. For example direct 2232 * read to an mmaped fuse file will set the page dirty twice; once when 2233 * the pages are faulted with get_user_pages(), and then after the read 2234 * completed. 2235 */ 2236 if (fuse_page_is_writeback(data->inode, page->index)) 2237 return true; 2238 2239 /* Reached max pages */ 2240 if (ap->num_pages == fc->max_pages) 2241 return true; 2242 2243 /* Reached max write bytes */ 2244 if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) 2245 return true; 2246 2247 /* Discontinuity */ 2248 if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) 2249 return true; 2250 2251 /* Need to grow the pages array? If so, did the expansion fail? */ 2252 if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) 2253 return true; 2254 2255 return false; 2256 } 2257 2258 static int fuse_writepages_fill(struct folio *folio, 2259 struct writeback_control *wbc, void *_data) 2260 { 2261 struct fuse_fill_wb_data *data = _data; 2262 struct fuse_writepage_args *wpa = data->wpa; 2263 struct fuse_args_pages *ap = &wpa->ia.ap; 2264 struct inode *inode = data->inode; 2265 struct fuse_inode *fi = get_fuse_inode(inode); 2266 struct fuse_conn *fc = get_fuse_conn(inode); 2267 struct page *tmp_page; 2268 int err; 2269 2270 if (!data->ff) { 2271 err = -EIO; 2272 data->ff = fuse_write_file_get(fi); 2273 if (!data->ff) 2274 goto out_unlock; 2275 } 2276 2277 if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { 2278 fuse_writepages_send(data); 2279 data->wpa = NULL; 2280 } 2281 2282 err = -ENOMEM; 2283 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 2284 if (!tmp_page) 2285 goto out_unlock; 2286 2287 /* 2288 * The page must not be redirtied until the writeout is completed 2289 * (i.e. userspace has sent a reply to the write request). Otherwise 2290 * there could be more than one temporary page instance for each real 2291 * page. 2292 * 2293 * This is ensured by holding the page lock in page_mkwrite() while 2294 * checking fuse_page_is_writeback(). We already hold the page lock 2295 * since clear_page_dirty_for_io() and keep it held until we add the 2296 * request to the fi->writepages list and increment ap->num_pages. 2297 * After this fuse_page_is_writeback() will indicate that the page is 2298 * under writeback, so we can release the page lock. 2299 */ 2300 if (data->wpa == NULL) { 2301 err = -ENOMEM; 2302 wpa = fuse_writepage_args_alloc(); 2303 if (!wpa) { 2304 __free_page(tmp_page); 2305 goto out_unlock; 2306 } 2307 fuse_writepage_add_to_bucket(fc, wpa); 2308 2309 data->max_pages = 1; 2310 2311 ap = &wpa->ia.ap; 2312 fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0); 2313 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; 2314 wpa->next = NULL; 2315 ap->args.in_pages = true; 2316 ap->args.end = fuse_writepage_end; 2317 ap->num_pages = 0; 2318 wpa->inode = inode; 2319 } 2320 folio_start_writeback(folio); 2321 2322 copy_highpage(tmp_page, &folio->page); 2323 ap->pages[ap->num_pages] = tmp_page; 2324 ap->descs[ap->num_pages].offset = 0; 2325 ap->descs[ap->num_pages].length = PAGE_SIZE; 2326 data->orig_pages[ap->num_pages] = &folio->page; 2327 2328 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 2329 inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 2330 2331 err = 0; 2332 if (data->wpa) { 2333 /* 2334 * Protected by fi->lock against concurrent access by 2335 * fuse_page_is_writeback(). 2336 */ 2337 spin_lock(&fi->lock); 2338 ap->num_pages++; 2339 spin_unlock(&fi->lock); 2340 } else if (fuse_writepage_add(wpa, &folio->page)) { 2341 data->wpa = wpa; 2342 } else { 2343 folio_end_writeback(folio); 2344 } 2345 out_unlock: 2346 folio_unlock(folio); 2347 2348 return err; 2349 } 2350 2351 static int fuse_writepages(struct address_space *mapping, 2352 struct writeback_control *wbc) 2353 { 2354 struct inode *inode = mapping->host; 2355 struct fuse_conn *fc = get_fuse_conn(inode); 2356 struct fuse_fill_wb_data data; 2357 int err; 2358 2359 err = -EIO; 2360 if (fuse_is_bad(inode)) 2361 goto out; 2362 2363 if (wbc->sync_mode == WB_SYNC_NONE && 2364 fc->num_background >= fc->congestion_threshold) 2365 return 0; 2366 2367 data.inode = inode; 2368 data.wpa = NULL; 2369 data.ff = NULL; 2370 2371 err = -ENOMEM; 2372 data.orig_pages = kcalloc(fc->max_pages, 2373 sizeof(struct page *), 2374 GFP_NOFS); 2375 if (!data.orig_pages) 2376 goto out; 2377 2378 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); 2379 if (data.wpa) { 2380 WARN_ON(!data.wpa->ia.ap.num_pages); 2381 fuse_writepages_send(&data); 2382 } 2383 if (data.ff) 2384 fuse_file_put(data.ff, false); 2385 2386 kfree(data.orig_pages); 2387 out: 2388 return err; 2389 } 2390 2391 /* 2392 * It's worthy to make sure that space is reserved on disk for the write, 2393 * but how to implement it without killing performance need more thinking. 2394 */ 2395 static int fuse_write_begin(struct file *file, struct address_space *mapping, 2396 loff_t pos, unsigned len, struct page **pagep, void **fsdata) 2397 { 2398 pgoff_t index = pos >> PAGE_SHIFT; 2399 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 2400 struct page *page; 2401 loff_t fsize; 2402 int err = -ENOMEM; 2403 2404 WARN_ON(!fc->writeback_cache); 2405 2406 page = grab_cache_page_write_begin(mapping, index); 2407 if (!page) 2408 goto error; 2409 2410 fuse_wait_on_page_writeback(mapping->host, page->index); 2411 2412 if (PageUptodate(page) || len == PAGE_SIZE) 2413 goto success; 2414 /* 2415 * Check if the start this page comes after the end of file, in which 2416 * case the readpage can be optimized away. 2417 */ 2418 fsize = i_size_read(mapping->host); 2419 if (fsize <= (pos & PAGE_MASK)) { 2420 size_t off = pos & ~PAGE_MASK; 2421 if (off) 2422 zero_user_segment(page, 0, off); 2423 goto success; 2424 } 2425 err = fuse_do_readpage(file, page); 2426 if (err) 2427 goto cleanup; 2428 success: 2429 *pagep = page; 2430 return 0; 2431 2432 cleanup: 2433 unlock_page(page); 2434 put_page(page); 2435 error: 2436 return err; 2437 } 2438 2439 static int fuse_write_end(struct file *file, struct address_space *mapping, 2440 loff_t pos, unsigned len, unsigned copied, 2441 struct page *page, void *fsdata) 2442 { 2443 struct inode *inode = page->mapping->host; 2444 2445 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ 2446 if (!copied) 2447 goto unlock; 2448 2449 pos += copied; 2450 if (!PageUptodate(page)) { 2451 /* Zero any unwritten bytes at the end of the page */ 2452 size_t endoff = pos & ~PAGE_MASK; 2453 if (endoff) 2454 zero_user_segment(page, endoff, PAGE_SIZE); 2455 SetPageUptodate(page); 2456 } 2457 2458 if (pos > inode->i_size) 2459 i_size_write(inode, pos); 2460 2461 set_page_dirty(page); 2462 2463 unlock: 2464 unlock_page(page); 2465 put_page(page); 2466 2467 return copied; 2468 } 2469 2470 static int fuse_launder_folio(struct folio *folio) 2471 { 2472 int err = 0; 2473 if (folio_clear_dirty_for_io(folio)) { 2474 struct inode *inode = folio->mapping->host; 2475 2476 /* Serialize with pending writeback for the same page */ 2477 fuse_wait_on_page_writeback(inode, folio->index); 2478 err = fuse_writepage_locked(folio); 2479 if (!err) 2480 fuse_wait_on_page_writeback(inode, folio->index); 2481 } 2482 return err; 2483 } 2484 2485 /* 2486 * Write back dirty data/metadata now (there may not be any suitable 2487 * open files later for data) 2488 */ 2489 static void fuse_vma_close(struct vm_area_struct *vma) 2490 { 2491 int err; 2492 2493 err = write_inode_now(vma->vm_file->f_mapping->host, 1); 2494 mapping_set_error(vma->vm_file->f_mapping, err); 2495 } 2496 2497 /* 2498 * Wait for writeback against this page to complete before allowing it 2499 * to be marked dirty again, and hence written back again, possibly 2500 * before the previous writepage completed. 2501 * 2502 * Block here, instead of in ->writepage(), so that the userspace fs 2503 * can only block processes actually operating on the filesystem. 2504 * 2505 * Otherwise unprivileged userspace fs would be able to block 2506 * unrelated: 2507 * 2508 * - page migration 2509 * - sync(2) 2510 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 2511 */ 2512 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) 2513 { 2514 struct page *page = vmf->page; 2515 struct inode *inode = file_inode(vmf->vma->vm_file); 2516 2517 file_update_time(vmf->vma->vm_file); 2518 lock_page(page); 2519 if (page->mapping != inode->i_mapping) { 2520 unlock_page(page); 2521 return VM_FAULT_NOPAGE; 2522 } 2523 2524 fuse_wait_on_page_writeback(inode, page->index); 2525 return VM_FAULT_LOCKED; 2526 } 2527 2528 static const struct vm_operations_struct fuse_file_vm_ops = { 2529 .close = fuse_vma_close, 2530 .fault = filemap_fault, 2531 .map_pages = filemap_map_pages, 2532 .page_mkwrite = fuse_page_mkwrite, 2533 }; 2534 2535 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 2536 { 2537 struct fuse_file *ff = file->private_data; 2538 struct fuse_conn *fc = ff->fm->fc; 2539 struct inode *inode = file_inode(file); 2540 int rc; 2541 2542 /* DAX mmap is superior to direct_io mmap */ 2543 if (FUSE_IS_DAX(inode)) 2544 return fuse_dax_mmap(file, vma); 2545 2546 /* 2547 * If inode is in passthrough io mode, because it has some file open 2548 * in passthrough mode, either mmap to backing file or fail mmap, 2549 * because mixing cached mmap and passthrough io mode is not allowed. 2550 */ 2551 if (fuse_file_passthrough(ff)) 2552 return fuse_passthrough_mmap(file, vma); 2553 else if (fuse_inode_backing(get_fuse_inode(inode))) 2554 return -ENODEV; 2555 2556 /* 2557 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT, 2558 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP. 2559 */ 2560 if (ff->open_flags & FOPEN_DIRECT_IO) { 2561 /* 2562 * Can't provide the coherency needed for MAP_SHARED 2563 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set. 2564 */ 2565 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap) 2566 return -ENODEV; 2567 2568 invalidate_inode_pages2(file->f_mapping); 2569 2570 if (!(vma->vm_flags & VM_MAYSHARE)) { 2571 /* MAP_PRIVATE */ 2572 return generic_file_mmap(file, vma); 2573 } 2574 2575 /* 2576 * First mmap of direct_io file enters caching inode io mode. 2577 * Also waits for parallel dio writers to go into serial mode 2578 * (exclusive instead of shared lock). 2579 * After first mmap, the inode stays in caching io mode until 2580 * the direct_io file release. 2581 */ 2582 rc = fuse_file_cached_io_open(inode, ff); 2583 if (rc) 2584 return rc; 2585 } 2586 2587 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2588 fuse_link_write_file(file); 2589 2590 file_accessed(file); 2591 vma->vm_ops = &fuse_file_vm_ops; 2592 return 0; 2593 } 2594 2595 static int convert_fuse_file_lock(struct fuse_conn *fc, 2596 const struct fuse_file_lock *ffl, 2597 struct file_lock *fl) 2598 { 2599 switch (ffl->type) { 2600 case F_UNLCK: 2601 break; 2602 2603 case F_RDLCK: 2604 case F_WRLCK: 2605 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 2606 ffl->end < ffl->start) 2607 return -EIO; 2608 2609 fl->fl_start = ffl->start; 2610 fl->fl_end = ffl->end; 2611 2612 /* 2613 * Convert pid into init's pid namespace. The locks API will 2614 * translate it into the caller's pid namespace. 2615 */ 2616 rcu_read_lock(); 2617 fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns); 2618 rcu_read_unlock(); 2619 break; 2620 2621 default: 2622 return -EIO; 2623 } 2624 fl->c.flc_type = ffl->type; 2625 return 0; 2626 } 2627 2628 static void fuse_lk_fill(struct fuse_args *args, struct file *file, 2629 const struct file_lock *fl, int opcode, pid_t pid, 2630 int flock, struct fuse_lk_in *inarg) 2631 { 2632 struct inode *inode = file_inode(file); 2633 struct fuse_conn *fc = get_fuse_conn(inode); 2634 struct fuse_file *ff = file->private_data; 2635 2636 memset(inarg, 0, sizeof(*inarg)); 2637 inarg->fh = ff->fh; 2638 inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner); 2639 inarg->lk.start = fl->fl_start; 2640 inarg->lk.end = fl->fl_end; 2641 inarg->lk.type = fl->c.flc_type; 2642 inarg->lk.pid = pid; 2643 if (flock) 2644 inarg->lk_flags |= FUSE_LK_FLOCK; 2645 args->opcode = opcode; 2646 args->nodeid = get_node_id(inode); 2647 args->in_numargs = 1; 2648 args->in_args[0].size = sizeof(*inarg); 2649 args->in_args[0].value = inarg; 2650 } 2651 2652 static int fuse_getlk(struct file *file, struct file_lock *fl) 2653 { 2654 struct inode *inode = file_inode(file); 2655 struct fuse_mount *fm = get_fuse_mount(inode); 2656 FUSE_ARGS(args); 2657 struct fuse_lk_in inarg; 2658 struct fuse_lk_out outarg; 2659 int err; 2660 2661 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); 2662 args.out_numargs = 1; 2663 args.out_args[0].size = sizeof(outarg); 2664 args.out_args[0].value = &outarg; 2665 err = fuse_simple_request(fm, &args); 2666 if (!err) 2667 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl); 2668 2669 return err; 2670 } 2671 2672 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 2673 { 2674 struct inode *inode = file_inode(file); 2675 struct fuse_mount *fm = get_fuse_mount(inode); 2676 FUSE_ARGS(args); 2677 struct fuse_lk_in inarg; 2678 int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 2679 struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL; 2680 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns); 2681 int err; 2682 2683 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 2684 /* NLM needs asynchronous locks, which we don't support yet */ 2685 return -ENOLCK; 2686 } 2687 2688 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); 2689 err = fuse_simple_request(fm, &args); 2690 2691 /* locking is restartable */ 2692 if (err == -EINTR) 2693 err = -ERESTARTSYS; 2694 2695 return err; 2696 } 2697 2698 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 2699 { 2700 struct inode *inode = file_inode(file); 2701 struct fuse_conn *fc = get_fuse_conn(inode); 2702 int err; 2703 2704 if (cmd == F_CANCELLK) { 2705 err = 0; 2706 } else if (cmd == F_GETLK) { 2707 if (fc->no_lock) { 2708 posix_test_lock(file, fl); 2709 err = 0; 2710 } else 2711 err = fuse_getlk(file, fl); 2712 } else { 2713 if (fc->no_lock) 2714 err = posix_lock_file(file, fl, NULL); 2715 else 2716 err = fuse_setlk(file, fl, 0); 2717 } 2718 return err; 2719 } 2720 2721 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 2722 { 2723 struct inode *inode = file_inode(file); 2724 struct fuse_conn *fc = get_fuse_conn(inode); 2725 int err; 2726 2727 if (fc->no_flock) { 2728 err = locks_lock_file_wait(file, fl); 2729 } else { 2730 struct fuse_file *ff = file->private_data; 2731 2732 /* emulate flock with POSIX locks */ 2733 ff->flock = true; 2734 err = fuse_setlk(file, fl, 1); 2735 } 2736 2737 return err; 2738 } 2739 2740 static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 2741 { 2742 struct inode *inode = mapping->host; 2743 struct fuse_mount *fm = get_fuse_mount(inode); 2744 FUSE_ARGS(args); 2745 struct fuse_bmap_in inarg; 2746 struct fuse_bmap_out outarg; 2747 int err; 2748 2749 if (!inode->i_sb->s_bdev || fm->fc->no_bmap) 2750 return 0; 2751 2752 memset(&inarg, 0, sizeof(inarg)); 2753 inarg.block = block; 2754 inarg.blocksize = inode->i_sb->s_blocksize; 2755 args.opcode = FUSE_BMAP; 2756 args.nodeid = get_node_id(inode); 2757 args.in_numargs = 1; 2758 args.in_args[0].size = sizeof(inarg); 2759 args.in_args[0].value = &inarg; 2760 args.out_numargs = 1; 2761 args.out_args[0].size = sizeof(outarg); 2762 args.out_args[0].value = &outarg; 2763 err = fuse_simple_request(fm, &args); 2764 if (err == -ENOSYS) 2765 fm->fc->no_bmap = 1; 2766 2767 return err ? 0 : outarg.block; 2768 } 2769 2770 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) 2771 { 2772 struct inode *inode = file->f_mapping->host; 2773 struct fuse_mount *fm = get_fuse_mount(inode); 2774 struct fuse_file *ff = file->private_data; 2775 FUSE_ARGS(args); 2776 struct fuse_lseek_in inarg = { 2777 .fh = ff->fh, 2778 .offset = offset, 2779 .whence = whence 2780 }; 2781 struct fuse_lseek_out outarg; 2782 int err; 2783 2784 if (fm->fc->no_lseek) 2785 goto fallback; 2786 2787 args.opcode = FUSE_LSEEK; 2788 args.nodeid = ff->nodeid; 2789 args.in_numargs = 1; 2790 args.in_args[0].size = sizeof(inarg); 2791 args.in_args[0].value = &inarg; 2792 args.out_numargs = 1; 2793 args.out_args[0].size = sizeof(outarg); 2794 args.out_args[0].value = &outarg; 2795 err = fuse_simple_request(fm, &args); 2796 if (err) { 2797 if (err == -ENOSYS) { 2798 fm->fc->no_lseek = 1; 2799 goto fallback; 2800 } 2801 return err; 2802 } 2803 2804 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes); 2805 2806 fallback: 2807 err = fuse_update_attributes(inode, file, STATX_SIZE); 2808 if (!err) 2809 return generic_file_llseek(file, offset, whence); 2810 else 2811 return err; 2812 } 2813 2814 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 2815 { 2816 loff_t retval; 2817 struct inode *inode = file_inode(file); 2818 2819 switch (whence) { 2820 case SEEK_SET: 2821 case SEEK_CUR: 2822 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 2823 retval = generic_file_llseek(file, offset, whence); 2824 break; 2825 case SEEK_END: 2826 inode_lock(inode); 2827 retval = fuse_update_attributes(inode, file, STATX_SIZE); 2828 if (!retval) 2829 retval = generic_file_llseek(file, offset, whence); 2830 inode_unlock(inode); 2831 break; 2832 case SEEK_HOLE: 2833 case SEEK_DATA: 2834 inode_lock(inode); 2835 retval = fuse_lseek(file, offset, whence); 2836 inode_unlock(inode); 2837 break; 2838 default: 2839 retval = -EINVAL; 2840 } 2841 2842 return retval; 2843 } 2844 2845 /* 2846 * All files which have been polled are linked to RB tree 2847 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2848 * find the matching one. 2849 */ 2850 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2851 struct rb_node **parent_out) 2852 { 2853 struct rb_node **link = &fc->polled_files.rb_node; 2854 struct rb_node *last = NULL; 2855 2856 while (*link) { 2857 struct fuse_file *ff; 2858 2859 last = *link; 2860 ff = rb_entry(last, struct fuse_file, polled_node); 2861 2862 if (kh < ff->kh) 2863 link = &last->rb_left; 2864 else if (kh > ff->kh) 2865 link = &last->rb_right; 2866 else 2867 return link; 2868 } 2869 2870 if (parent_out) 2871 *parent_out = last; 2872 return link; 2873 } 2874 2875 /* 2876 * The file is about to be polled. Make sure it's on the polled_files 2877 * RB tree. Note that files once added to the polled_files tree are 2878 * not removed before the file is released. This is because a file 2879 * polled once is likely to be polled again. 2880 */ 2881 static void fuse_register_polled_file(struct fuse_conn *fc, 2882 struct fuse_file *ff) 2883 { 2884 spin_lock(&fc->lock); 2885 if (RB_EMPTY_NODE(&ff->polled_node)) { 2886 struct rb_node **link, *parent; 2887 2888 link = fuse_find_polled_node(fc, ff->kh, &parent); 2889 BUG_ON(*link); 2890 rb_link_node(&ff->polled_node, parent, link); 2891 rb_insert_color(&ff->polled_node, &fc->polled_files); 2892 } 2893 spin_unlock(&fc->lock); 2894 } 2895 2896 __poll_t fuse_file_poll(struct file *file, poll_table *wait) 2897 { 2898 struct fuse_file *ff = file->private_data; 2899 struct fuse_mount *fm = ff->fm; 2900 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2901 struct fuse_poll_out outarg; 2902 FUSE_ARGS(args); 2903 int err; 2904 2905 if (fm->fc->no_poll) 2906 return DEFAULT_POLLMASK; 2907 2908 poll_wait(file, &ff->poll_wait, wait); 2909 inarg.events = mangle_poll(poll_requested_events(wait)); 2910 2911 /* 2912 * Ask for notification iff there's someone waiting for it. 2913 * The client may ignore the flag and always notify. 2914 */ 2915 if (waitqueue_active(&ff->poll_wait)) { 2916 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2917 fuse_register_polled_file(fm->fc, ff); 2918 } 2919 2920 args.opcode = FUSE_POLL; 2921 args.nodeid = ff->nodeid; 2922 args.in_numargs = 1; 2923 args.in_args[0].size = sizeof(inarg); 2924 args.in_args[0].value = &inarg; 2925 args.out_numargs = 1; 2926 args.out_args[0].size = sizeof(outarg); 2927 args.out_args[0].value = &outarg; 2928 err = fuse_simple_request(fm, &args); 2929 2930 if (!err) 2931 return demangle_poll(outarg.revents); 2932 if (err == -ENOSYS) { 2933 fm->fc->no_poll = 1; 2934 return DEFAULT_POLLMASK; 2935 } 2936 return EPOLLERR; 2937 } 2938 EXPORT_SYMBOL_GPL(fuse_file_poll); 2939 2940 /* 2941 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2942 * wakes up the poll waiters. 2943 */ 2944 int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2945 struct fuse_notify_poll_wakeup_out *outarg) 2946 { 2947 u64 kh = outarg->kh; 2948 struct rb_node **link; 2949 2950 spin_lock(&fc->lock); 2951 2952 link = fuse_find_polled_node(fc, kh, NULL); 2953 if (*link) { 2954 struct fuse_file *ff; 2955 2956 ff = rb_entry(*link, struct fuse_file, polled_node); 2957 wake_up_interruptible_sync(&ff->poll_wait); 2958 } 2959 2960 spin_unlock(&fc->lock); 2961 return 0; 2962 } 2963 2964 static void fuse_do_truncate(struct file *file) 2965 { 2966 struct inode *inode = file->f_mapping->host; 2967 struct iattr attr; 2968 2969 attr.ia_valid = ATTR_SIZE; 2970 attr.ia_size = i_size_read(inode); 2971 2972 attr.ia_file = file; 2973 attr.ia_valid |= ATTR_FILE; 2974 2975 fuse_do_setattr(file_dentry(file), &attr, file); 2976 } 2977 2978 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off) 2979 { 2980 return round_up(off, fc->max_pages << PAGE_SHIFT); 2981 } 2982 2983 static ssize_t 2984 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2985 { 2986 DECLARE_COMPLETION_ONSTACK(wait); 2987 ssize_t ret = 0; 2988 struct file *file = iocb->ki_filp; 2989 struct fuse_file *ff = file->private_data; 2990 loff_t pos = 0; 2991 struct inode *inode; 2992 loff_t i_size; 2993 size_t count = iov_iter_count(iter), shortened = 0; 2994 loff_t offset = iocb->ki_pos; 2995 struct fuse_io_priv *io; 2996 2997 pos = offset; 2998 inode = file->f_mapping->host; 2999 i_size = i_size_read(inode); 3000 3001 if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) 3002 return 0; 3003 3004 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 3005 if (!io) 3006 return -ENOMEM; 3007 spin_lock_init(&io->lock); 3008 kref_init(&io->refcnt); 3009 io->reqs = 1; 3010 io->bytes = -1; 3011 io->size = 0; 3012 io->offset = offset; 3013 io->write = (iov_iter_rw(iter) == WRITE); 3014 io->err = 0; 3015 /* 3016 * By default, we want to optimize all I/Os with async request 3017 * submission to the client filesystem if supported. 3018 */ 3019 io->async = ff->fm->fc->async_dio; 3020 io->iocb = iocb; 3021 io->blocking = is_sync_kiocb(iocb); 3022 3023 /* optimization for short read */ 3024 if (io->async && !io->write && offset + count > i_size) { 3025 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset)); 3026 shortened = count - iov_iter_count(iter); 3027 count -= shortened; 3028 } 3029 3030 /* 3031 * We cannot asynchronously extend the size of a file. 3032 * In such case the aio will behave exactly like sync io. 3033 */ 3034 if ((offset + count > i_size) && io->write) 3035 io->blocking = true; 3036 3037 if (io->async && io->blocking) { 3038 /* 3039 * Additional reference to keep io around after 3040 * calling fuse_aio_complete() 3041 */ 3042 kref_get(&io->refcnt); 3043 io->done = &wait; 3044 } 3045 3046 if (iov_iter_rw(iter) == WRITE) { 3047 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); 3048 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 3049 } else { 3050 ret = __fuse_direct_read(io, iter, &pos); 3051 } 3052 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); 3053 3054 if (io->async) { 3055 bool blocking = io->blocking; 3056 3057 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 3058 3059 /* we have a non-extending, async request, so return */ 3060 if (!blocking) 3061 return -EIOCBQUEUED; 3062 3063 wait_for_completion(&wait); 3064 ret = fuse_get_res_by_io(io); 3065 } 3066 3067 kref_put(&io->refcnt, fuse_io_release); 3068 3069 if (iov_iter_rw(iter) == WRITE) { 3070 fuse_write_update_attr(inode, pos, ret); 3071 /* For extending writes we already hold exclusive lock */ 3072 if (ret < 0 && offset + count > i_size) 3073 fuse_do_truncate(file); 3074 } 3075 3076 return ret; 3077 } 3078 3079 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) 3080 { 3081 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX); 3082 3083 if (!err) 3084 fuse_sync_writes(inode); 3085 3086 return err; 3087 } 3088 3089 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 3090 loff_t length) 3091 { 3092 struct fuse_file *ff = file->private_data; 3093 struct inode *inode = file_inode(file); 3094 struct fuse_inode *fi = get_fuse_inode(inode); 3095 struct fuse_mount *fm = ff->fm; 3096 FUSE_ARGS(args); 3097 struct fuse_fallocate_in inarg = { 3098 .fh = ff->fh, 3099 .offset = offset, 3100 .length = length, 3101 .mode = mode 3102 }; 3103 int err; 3104 bool block_faults = FUSE_IS_DAX(inode) && 3105 (!(mode & FALLOC_FL_KEEP_SIZE) || 3106 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))); 3107 3108 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 3109 FALLOC_FL_ZERO_RANGE)) 3110 return -EOPNOTSUPP; 3111 3112 if (fm->fc->no_fallocate) 3113 return -EOPNOTSUPP; 3114 3115 inode_lock(inode); 3116 if (block_faults) { 3117 filemap_invalidate_lock(inode->i_mapping); 3118 err = fuse_dax_break_layouts(inode, 0, 0); 3119 if (err) 3120 goto out; 3121 } 3122 3123 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) { 3124 loff_t endbyte = offset + length - 1; 3125 3126 err = fuse_writeback_range(inode, offset, endbyte); 3127 if (err) 3128 goto out; 3129 } 3130 3131 if (!(mode & FALLOC_FL_KEEP_SIZE) && 3132 offset + length > i_size_read(inode)) { 3133 err = inode_newsize_ok(inode, offset + length); 3134 if (err) 3135 goto out; 3136 } 3137 3138 err = file_modified(file); 3139 if (err) 3140 goto out; 3141 3142 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3143 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3144 3145 args.opcode = FUSE_FALLOCATE; 3146 args.nodeid = ff->nodeid; 3147 args.in_numargs = 1; 3148 args.in_args[0].size = sizeof(inarg); 3149 args.in_args[0].value = &inarg; 3150 err = fuse_simple_request(fm, &args); 3151 if (err == -ENOSYS) { 3152 fm->fc->no_fallocate = 1; 3153 err = -EOPNOTSUPP; 3154 } 3155 if (err) 3156 goto out; 3157 3158 /* we could have extended the file */ 3159 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 3160 if (fuse_write_update_attr(inode, offset + length, length)) 3161 file_update_time(file); 3162 } 3163 3164 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) 3165 truncate_pagecache_range(inode, offset, offset + length - 1); 3166 3167 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); 3168 3169 out: 3170 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3171 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3172 3173 if (block_faults) 3174 filemap_invalidate_unlock(inode->i_mapping); 3175 3176 inode_unlock(inode); 3177 3178 fuse_flush_time_update(inode); 3179 3180 return err; 3181 } 3182 3183 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, 3184 struct file *file_out, loff_t pos_out, 3185 size_t len, unsigned int flags) 3186 { 3187 struct fuse_file *ff_in = file_in->private_data; 3188 struct fuse_file *ff_out = file_out->private_data; 3189 struct inode *inode_in = file_inode(file_in); 3190 struct inode *inode_out = file_inode(file_out); 3191 struct fuse_inode *fi_out = get_fuse_inode(inode_out); 3192 struct fuse_mount *fm = ff_in->fm; 3193 struct fuse_conn *fc = fm->fc; 3194 FUSE_ARGS(args); 3195 struct fuse_copy_file_range_in inarg = { 3196 .fh_in = ff_in->fh, 3197 .off_in = pos_in, 3198 .nodeid_out = ff_out->nodeid, 3199 .fh_out = ff_out->fh, 3200 .off_out = pos_out, 3201 .len = len, 3202 .flags = flags 3203 }; 3204 struct fuse_write_out outarg; 3205 ssize_t err; 3206 /* mark unstable when write-back is not used, and file_out gets 3207 * extended */ 3208 bool is_unstable = (!fc->writeback_cache) && 3209 ((pos_out + len) > inode_out->i_size); 3210 3211 if (fc->no_copy_file_range) 3212 return -EOPNOTSUPP; 3213 3214 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) 3215 return -EXDEV; 3216 3217 inode_lock(inode_in); 3218 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); 3219 inode_unlock(inode_in); 3220 if (err) 3221 return err; 3222 3223 inode_lock(inode_out); 3224 3225 err = file_modified(file_out); 3226 if (err) 3227 goto out; 3228 3229 /* 3230 * Write out dirty pages in the destination file before sending the COPY 3231 * request to userspace. After the request is completed, truncate off 3232 * pages (including partial ones) from the cache that have been copied, 3233 * since these contain stale data at that point. 3234 * 3235 * This should be mostly correct, but if the COPY writes to partial 3236 * pages (at the start or end) and the parts not covered by the COPY are 3237 * written through a memory map after calling fuse_writeback_range(), 3238 * then these partial page modifications will be lost on truncation. 3239 * 3240 * It is unlikely that someone would rely on such mixed style 3241 * modifications. Yet this does give less guarantees than if the 3242 * copying was performed with write(2). 3243 * 3244 * To fix this a mapping->invalidate_lock could be used to prevent new 3245 * faults while the copy is ongoing. 3246 */ 3247 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); 3248 if (err) 3249 goto out; 3250 3251 if (is_unstable) 3252 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3253 3254 args.opcode = FUSE_COPY_FILE_RANGE; 3255 args.nodeid = ff_in->nodeid; 3256 args.in_numargs = 1; 3257 args.in_args[0].size = sizeof(inarg); 3258 args.in_args[0].value = &inarg; 3259 args.out_numargs = 1; 3260 args.out_args[0].size = sizeof(outarg); 3261 args.out_args[0].value = &outarg; 3262 err = fuse_simple_request(fm, &args); 3263 if (err == -ENOSYS) { 3264 fc->no_copy_file_range = 1; 3265 err = -EOPNOTSUPP; 3266 } 3267 if (err) 3268 goto out; 3269 3270 truncate_inode_pages_range(inode_out->i_mapping, 3271 ALIGN_DOWN(pos_out, PAGE_SIZE), 3272 ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); 3273 3274 file_update_time(file_out); 3275 fuse_write_update_attr(inode_out, pos_out + outarg.size, outarg.size); 3276 3277 err = outarg.size; 3278 out: 3279 if (is_unstable) 3280 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); 3281 3282 inode_unlock(inode_out); 3283 file_accessed(file_in); 3284 3285 fuse_flush_time_update(inode_out); 3286 3287 return err; 3288 } 3289 3290 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off, 3291 struct file *dst_file, loff_t dst_off, 3292 size_t len, unsigned int flags) 3293 { 3294 ssize_t ret; 3295 3296 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off, 3297 len, flags); 3298 3299 if (ret == -EOPNOTSUPP || ret == -EXDEV) 3300 ret = splice_copy_file_range(src_file, src_off, dst_file, 3301 dst_off, len); 3302 return ret; 3303 } 3304 3305 static const struct file_operations fuse_file_operations = { 3306 .llseek = fuse_file_llseek, 3307 .read_iter = fuse_file_read_iter, 3308 .write_iter = fuse_file_write_iter, 3309 .mmap = fuse_file_mmap, 3310 .open = fuse_open, 3311 .flush = fuse_flush, 3312 .release = fuse_release, 3313 .fsync = fuse_fsync, 3314 .lock = fuse_file_lock, 3315 .get_unmapped_area = thp_get_unmapped_area, 3316 .flock = fuse_file_flock, 3317 .splice_read = fuse_splice_read, 3318 .splice_write = fuse_splice_write, 3319 .unlocked_ioctl = fuse_file_ioctl, 3320 .compat_ioctl = fuse_file_compat_ioctl, 3321 .poll = fuse_file_poll, 3322 .fallocate = fuse_file_fallocate, 3323 .copy_file_range = fuse_copy_file_range, 3324 }; 3325 3326 static const struct address_space_operations fuse_file_aops = { 3327 .read_folio = fuse_read_folio, 3328 .readahead = fuse_readahead, 3329 .writepages = fuse_writepages, 3330 .launder_folio = fuse_launder_folio, 3331 .dirty_folio = filemap_dirty_folio, 3332 .migrate_folio = filemap_migrate_folio, 3333 .bmap = fuse_bmap, 3334 .direct_IO = fuse_direct_IO, 3335 .write_begin = fuse_write_begin, 3336 .write_end = fuse_write_end, 3337 }; 3338 3339 void fuse_init_file_inode(struct inode *inode, unsigned int flags) 3340 { 3341 struct fuse_inode *fi = get_fuse_inode(inode); 3342 3343 inode->i_fop = &fuse_file_operations; 3344 inode->i_data.a_ops = &fuse_file_aops; 3345 3346 INIT_LIST_HEAD(&fi->write_files); 3347 INIT_LIST_HEAD(&fi->queued_writes); 3348 fi->writectr = 0; 3349 fi->iocachectr = 0; 3350 init_waitqueue_head(&fi->page_waitq); 3351 init_waitqueue_head(&fi->direct_io_waitq); 3352 fi->writepages = RB_ROOT; 3353 3354 if (IS_ENABLED(CONFIG_FUSE_DAX)) 3355 fuse_dax_inode_init(inode, flags); 3356 } 3357