1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "dev_uring_i.h" 10 #include "fuse_i.h" 11 #include "fuse_dev_i.h" 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/poll.h> 16 #include <linux/sched/signal.h> 17 #include <linux/uio.h> 18 #include <linux/miscdevice.h> 19 #include <linux/pagemap.h> 20 #include <linux/file.h> 21 #include <linux/slab.h> 22 #include <linux/pipe_fs_i.h> 23 #include <linux/swap.h> 24 #include <linux/splice.h> 25 #include <linux/sched.h> 26 #include <linux/seq_file.h> 27 28 #include "fuse_trace.h" 29 30 MODULE_ALIAS_MISCDEV(FUSE_MINOR); 31 MODULE_ALIAS("devname:fuse"); 32 33 static struct kmem_cache *fuse_req_cachep; 34 35 const unsigned long fuse_timeout_timer_freq = 36 secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ); 37 38 bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list) 39 { 40 struct fuse_req *req; 41 42 req = list_first_entry_or_null(list, struct fuse_req, list); 43 if (!req) 44 return false; 45 return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout); 46 } 47 48 static bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing) 49 { 50 int i; 51 52 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 53 if (fuse_request_expired(fc, &processing[i])) 54 return true; 55 56 return false; 57 } 58 59 /* 60 * Check if any requests aren't being completed by the time the request timeout 61 * elapses. To do so, we: 62 * - check the fiq pending list 63 * - check the bg queue 64 * - check the fpq io and processing lists 65 * 66 * To make this fast, we only check against the head request on each list since 67 * these are generally queued in order of creation time (eg newer requests get 68 * queued to the tail). We might miss a few edge cases (eg requests transitioning 69 * between lists, re-sent requests at the head of the pending list having a 70 * later creation time than other requests on that list, etc.) but that is fine 71 * since if the request never gets fulfilled, it will eventually be caught. 72 */ 73 void fuse_check_timeout(struct work_struct *work) 74 { 75 struct delayed_work *dwork = to_delayed_work(work); 76 struct fuse_conn *fc = container_of(dwork, struct fuse_conn, 77 timeout.work); 78 struct fuse_iqueue *fiq = &fc->iq; 79 struct fuse_dev *fud; 80 struct fuse_pqueue *fpq; 81 bool expired = false; 82 83 if (!atomic_read(&fc->num_waiting)) 84 goto out; 85 86 spin_lock(&fiq->lock); 87 expired = fuse_request_expired(fc, &fiq->pending); 88 spin_unlock(&fiq->lock); 89 if (expired) 90 goto abort_conn; 91 92 spin_lock(&fc->bg_lock); 93 expired = fuse_request_expired(fc, &fc->bg_queue); 94 spin_unlock(&fc->bg_lock); 95 if (expired) 96 goto abort_conn; 97 98 spin_lock(&fc->lock); 99 if (!fc->connected) { 100 spin_unlock(&fc->lock); 101 return; 102 } 103 list_for_each_entry(fud, &fc->devices, entry) { 104 fpq = &fud->pq; 105 spin_lock(&fpq->lock); 106 if (fuse_request_expired(fc, &fpq->io) || 107 fuse_fpq_processing_expired(fc, fpq->processing)) { 108 spin_unlock(&fpq->lock); 109 spin_unlock(&fc->lock); 110 goto abort_conn; 111 } 112 113 spin_unlock(&fpq->lock); 114 } 115 spin_unlock(&fc->lock); 116 117 if (fuse_uring_request_expired(fc)) 118 goto abort_conn; 119 120 out: 121 queue_delayed_work(system_percpu_wq, &fc->timeout.work, 122 fuse_timeout_timer_freq); 123 return; 124 125 abort_conn: 126 fuse_abort_conn(fc); 127 } 128 129 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) 130 { 131 INIT_LIST_HEAD(&req->list); 132 INIT_LIST_HEAD(&req->intr_entry); 133 init_waitqueue_head(&req->waitq); 134 refcount_set(&req->count, 1); 135 __set_bit(FR_PENDING, &req->flags); 136 req->fm = fm; 137 req->create_time = jiffies; 138 } 139 140 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) 141 { 142 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); 143 if (req) 144 fuse_request_init(fm, req); 145 146 return req; 147 } 148 149 static void fuse_request_free(struct fuse_req *req) 150 { 151 kmem_cache_free(fuse_req_cachep, req); 152 } 153 154 static void __fuse_get_request(struct fuse_req *req) 155 { 156 refcount_inc(&req->count); 157 } 158 159 /* Must be called with > 1 refcount */ 160 static void __fuse_put_request(struct fuse_req *req) 161 { 162 refcount_dec(&req->count); 163 } 164 165 void fuse_set_initialized(struct fuse_conn *fc) 166 { 167 /* Make sure stores before this are seen on another CPU */ 168 smp_wmb(); 169 fc->initialized = 1; 170 } 171 172 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 173 { 174 return !fc->initialized || (for_background && fc->blocked) || 175 (fc->io_uring && fc->connected && !fuse_uring_ready(fc)); 176 } 177 178 static void fuse_drop_waiting(struct fuse_conn *fc) 179 { 180 /* 181 * lockess check of fc->connected is okay, because atomic_dec_and_test() 182 * provides a memory barrier matched with the one in fuse_wait_aborted() 183 * to ensure no wake-up is missed. 184 */ 185 if (atomic_dec_and_test(&fc->num_waiting) && 186 !READ_ONCE(fc->connected)) { 187 /* wake up aborters */ 188 wake_up_all(&fc->blocked_waitq); 189 } 190 } 191 192 static void fuse_put_request(struct fuse_req *req); 193 194 static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap, 195 struct fuse_mount *fm, 196 bool for_background) 197 { 198 struct fuse_conn *fc = fm->fc; 199 struct fuse_req *req; 200 bool no_idmap = !fm->sb || (fm->sb->s_iflags & SB_I_NOIDMAP); 201 kuid_t fsuid; 202 kgid_t fsgid; 203 int err; 204 205 atomic_inc(&fc->num_waiting); 206 207 if (fuse_block_alloc(fc, for_background)) { 208 err = -EINTR; 209 if (wait_event_state_exclusive(fc->blocked_waitq, 210 !fuse_block_alloc(fc, for_background), 211 (TASK_KILLABLE | TASK_FREEZABLE))) 212 goto out; 213 } 214 /* Matches smp_wmb() in fuse_set_initialized() */ 215 smp_rmb(); 216 217 err = -ENOTCONN; 218 if (!fc->connected) 219 goto out; 220 221 err = -ECONNREFUSED; 222 if (fc->conn_error) 223 goto out; 224 225 req = fuse_request_alloc(fm, GFP_KERNEL); 226 err = -ENOMEM; 227 if (!req) { 228 if (for_background) 229 wake_up(&fc->blocked_waitq); 230 goto out; 231 } 232 233 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); 234 235 __set_bit(FR_WAITING, &req->flags); 236 if (for_background) 237 __set_bit(FR_BACKGROUND, &req->flags); 238 239 /* 240 * Keep the old behavior when idmappings support was not 241 * declared by a FUSE server. 242 * 243 * For those FUSE servers who support idmapped mounts, 244 * we send UID/GID only along with "inode creation" 245 * fuse requests, otherwise idmap == &invalid_mnt_idmap and 246 * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID. 247 */ 248 fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns); 249 fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns); 250 req->in.h.uid = from_kuid(fc->user_ns, fsuid); 251 req->in.h.gid = from_kgid(fc->user_ns, fsgid); 252 253 if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) || 254 req->in.h.gid == ((gid_t)-1))) { 255 fuse_put_request(req); 256 return ERR_PTR(-EOVERFLOW); 257 } 258 259 return req; 260 261 out: 262 fuse_drop_waiting(fc); 263 return ERR_PTR(err); 264 } 265 266 static void fuse_put_request(struct fuse_req *req) 267 { 268 struct fuse_conn *fc = req->fm->fc; 269 270 if (refcount_dec_and_test(&req->count)) { 271 if (test_bit(FR_BACKGROUND, &req->flags)) { 272 /* 273 * We get here in the unlikely case that a background 274 * request was allocated but not sent 275 */ 276 spin_lock(&fc->bg_lock); 277 if (!fc->blocked) 278 wake_up(&fc->blocked_waitq); 279 spin_unlock(&fc->bg_lock); 280 } 281 282 if (test_bit(FR_WAITING, &req->flags)) { 283 __clear_bit(FR_WAITING, &req->flags); 284 fuse_drop_waiting(fc); 285 } 286 287 fuse_request_free(req); 288 } 289 } 290 291 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) 292 { 293 unsigned nbytes = 0; 294 unsigned i; 295 296 for (i = 0; i < numargs; i++) 297 nbytes += args[i].size; 298 299 return nbytes; 300 } 301 EXPORT_SYMBOL_GPL(fuse_len_args); 302 303 static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq) 304 { 305 fiq->reqctr += FUSE_REQ_ID_STEP; 306 return fiq->reqctr; 307 } 308 309 u64 fuse_get_unique(struct fuse_iqueue *fiq) 310 { 311 u64 ret; 312 313 spin_lock(&fiq->lock); 314 ret = fuse_get_unique_locked(fiq); 315 spin_unlock(&fiq->lock); 316 317 return ret; 318 } 319 EXPORT_SYMBOL_GPL(fuse_get_unique); 320 321 unsigned int fuse_req_hash(u64 unique) 322 { 323 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); 324 } 325 EXPORT_SYMBOL_GPL(fuse_req_hash); 326 327 /* 328 * A new request is available, wake fiq->waitq 329 */ 330 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq) 331 __releases(fiq->lock) 332 { 333 wake_up(&fiq->waitq); 334 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 335 spin_unlock(&fiq->lock); 336 } 337 338 void fuse_dev_queue_forget(struct fuse_iqueue *fiq, 339 struct fuse_forget_link *forget) 340 { 341 spin_lock(&fiq->lock); 342 if (fiq->connected) { 343 fiq->forget_list_tail->next = forget; 344 fiq->forget_list_tail = forget; 345 fuse_dev_wake_and_unlock(fiq); 346 } else { 347 kfree(forget); 348 spin_unlock(&fiq->lock); 349 } 350 } 351 352 void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) 353 { 354 spin_lock(&fiq->lock); 355 if (list_empty(&req->intr_entry)) { 356 list_add_tail(&req->intr_entry, &fiq->interrupts); 357 /* 358 * Pairs with smp_mb() implied by test_and_set_bit() 359 * from fuse_request_end(). 360 */ 361 smp_mb(); 362 if (test_bit(FR_FINISHED, &req->flags)) { 363 list_del_init(&req->intr_entry); 364 spin_unlock(&fiq->lock); 365 } else { 366 fuse_dev_wake_and_unlock(fiq); 367 } 368 } else { 369 spin_unlock(&fiq->lock); 370 } 371 } 372 373 static inline void fuse_request_assign_unique_locked(struct fuse_iqueue *fiq, 374 struct fuse_req *req) 375 { 376 if (req->in.h.opcode != FUSE_NOTIFY_REPLY) 377 req->in.h.unique = fuse_get_unique_locked(fiq); 378 379 /* tracepoint captures in.h.unique and in.h.len */ 380 trace_fuse_request_send(req); 381 } 382 383 inline void fuse_request_assign_unique(struct fuse_iqueue *fiq, 384 struct fuse_req *req) 385 { 386 if (req->in.h.opcode != FUSE_NOTIFY_REPLY) 387 req->in.h.unique = fuse_get_unique(fiq); 388 389 /* tracepoint captures in.h.unique and in.h.len */ 390 trace_fuse_request_send(req); 391 } 392 EXPORT_SYMBOL_GPL(fuse_request_assign_unique); 393 394 static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req) 395 { 396 spin_lock(&fiq->lock); 397 if (fiq->connected) { 398 fuse_request_assign_unique_locked(fiq, req); 399 list_add_tail(&req->list, &fiq->pending); 400 fuse_dev_wake_and_unlock(fiq); 401 } else { 402 spin_unlock(&fiq->lock); 403 req->out.h.error = -ENOTCONN; 404 clear_bit(FR_PENDING, &req->flags); 405 fuse_request_end(req); 406 } 407 } 408 409 const struct fuse_iqueue_ops fuse_dev_fiq_ops = { 410 .send_forget = fuse_dev_queue_forget, 411 .send_interrupt = fuse_dev_queue_interrupt, 412 .send_req = fuse_dev_queue_req, 413 }; 414 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); 415 416 static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req) 417 { 418 req->in.h.len = sizeof(struct fuse_in_header) + 419 fuse_len_args(req->args->in_numargs, 420 (struct fuse_arg *) req->args->in_args); 421 fiq->ops->send_req(fiq, req); 422 } 423 424 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, 425 u64 nodeid, u64 nlookup) 426 { 427 struct fuse_iqueue *fiq = &fc->iq; 428 429 forget->forget_one.nodeid = nodeid; 430 forget->forget_one.nlookup = nlookup; 431 432 fiq->ops->send_forget(fiq, forget); 433 } 434 435 static void flush_bg_queue(struct fuse_conn *fc) 436 { 437 struct fuse_iqueue *fiq = &fc->iq; 438 439 while (fc->active_background < fc->max_background && 440 !list_empty(&fc->bg_queue)) { 441 struct fuse_req *req; 442 443 req = list_first_entry(&fc->bg_queue, struct fuse_req, list); 444 list_del(&req->list); 445 fc->active_background++; 446 fuse_send_one(fiq, req); 447 } 448 } 449 450 /* 451 * This function is called when a request is finished. Either a reply 452 * has arrived or it was aborted (and not yet sent) or some error 453 * occurred during communication with userspace, or the device file 454 * was closed. The requester thread is woken up (if still waiting), 455 * the 'end' callback is called if given, else the reference to the 456 * request is released 457 */ 458 void fuse_request_end(struct fuse_req *req) 459 { 460 struct fuse_mount *fm = req->fm; 461 struct fuse_conn *fc = fm->fc; 462 struct fuse_iqueue *fiq = &fc->iq; 463 464 if (test_and_set_bit(FR_FINISHED, &req->flags)) 465 goto put_request; 466 467 trace_fuse_request_end(req); 468 /* 469 * test_and_set_bit() implies smp_mb() between bit 470 * changing and below FR_INTERRUPTED check. Pairs with 471 * smp_mb() from queue_interrupt(). 472 */ 473 if (test_bit(FR_INTERRUPTED, &req->flags)) { 474 spin_lock(&fiq->lock); 475 list_del_init(&req->intr_entry); 476 spin_unlock(&fiq->lock); 477 } 478 WARN_ON(test_bit(FR_PENDING, &req->flags)); 479 WARN_ON(test_bit(FR_SENT, &req->flags)); 480 if (test_bit(FR_BACKGROUND, &req->flags)) { 481 spin_lock(&fc->bg_lock); 482 clear_bit(FR_BACKGROUND, &req->flags); 483 if (fc->num_background == fc->max_background) { 484 fc->blocked = 0; 485 wake_up(&fc->blocked_waitq); 486 } else if (!fc->blocked) { 487 /* 488 * Wake up next waiter, if any. It's okay to use 489 * waitqueue_active(), as we've already synced up 490 * fc->blocked with waiters with the wake_up() call 491 * above. 492 */ 493 if (waitqueue_active(&fc->blocked_waitq)) 494 wake_up(&fc->blocked_waitq); 495 } 496 497 fc->num_background--; 498 fc->active_background--; 499 flush_bg_queue(fc); 500 spin_unlock(&fc->bg_lock); 501 } else { 502 /* Wake up waiter sleeping in request_wait_answer() */ 503 wake_up(&req->waitq); 504 } 505 506 if (test_bit(FR_ASYNC, &req->flags)) 507 req->args->end(fm, req->args, req->out.h.error); 508 put_request: 509 fuse_put_request(req); 510 } 511 EXPORT_SYMBOL_GPL(fuse_request_end); 512 513 static int queue_interrupt(struct fuse_req *req) 514 { 515 struct fuse_iqueue *fiq = &req->fm->fc->iq; 516 517 /* Check for we've sent request to interrupt this req */ 518 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) 519 return -EINVAL; 520 521 fiq->ops->send_interrupt(fiq, req); 522 523 return 0; 524 } 525 526 bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock) 527 { 528 spin_lock(lock); 529 if (test_bit(FR_PENDING, &req->flags)) { 530 /* 531 * FR_PENDING does not get cleared as the request will end 532 * up in destruction anyway. 533 */ 534 list_del(&req->list); 535 spin_unlock(lock); 536 __fuse_put_request(req); 537 req->out.h.error = -EINTR; 538 return true; 539 } 540 spin_unlock(lock); 541 return false; 542 } 543 544 static void request_wait_answer(struct fuse_req *req) 545 { 546 struct fuse_conn *fc = req->fm->fc; 547 struct fuse_iqueue *fiq = &fc->iq; 548 int err; 549 550 if (!fc->no_interrupt) { 551 /* Any signal may interrupt this */ 552 err = wait_event_interruptible(req->waitq, 553 test_bit(FR_FINISHED, &req->flags)); 554 if (!err) 555 return; 556 557 set_bit(FR_INTERRUPTED, &req->flags); 558 /* matches barrier in fuse_dev_do_read() */ 559 smp_mb__after_atomic(); 560 if (test_bit(FR_SENT, &req->flags)) 561 queue_interrupt(req); 562 } 563 564 if (!test_bit(FR_FORCE, &req->flags)) { 565 bool removed; 566 567 /* Only fatal signals may interrupt this */ 568 err = wait_event_killable(req->waitq, 569 test_bit(FR_FINISHED, &req->flags)); 570 if (!err) 571 return; 572 573 if (req->args->abort_on_kill) { 574 fuse_abort_conn(fc); 575 return; 576 } 577 578 if (test_bit(FR_URING, &req->flags)) 579 removed = fuse_uring_remove_pending_req(req); 580 else 581 removed = fuse_remove_pending_req(req, &fiq->lock); 582 if (removed) 583 return; 584 } 585 586 /* 587 * Either request is already in userspace, or it was forced. 588 * Wait it out. 589 */ 590 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); 591 } 592 593 static void __fuse_request_send(struct fuse_req *req) 594 { 595 struct fuse_iqueue *fiq = &req->fm->fc->iq; 596 597 BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); 598 599 /* acquire extra reference, since request is still needed after 600 fuse_request_end() */ 601 __fuse_get_request(req); 602 fuse_send_one(fiq, req); 603 604 request_wait_answer(req); 605 /* Pairs with smp_wmb() in fuse_request_end() */ 606 smp_rmb(); 607 } 608 609 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) 610 { 611 if (fc->minor < 4 && args->opcode == FUSE_STATFS) 612 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE; 613 614 if (fc->minor < 9) { 615 switch (args->opcode) { 616 case FUSE_LOOKUP: 617 case FUSE_CREATE: 618 case FUSE_MKNOD: 619 case FUSE_MKDIR: 620 case FUSE_SYMLINK: 621 case FUSE_LINK: 622 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 623 break; 624 case FUSE_GETATTR: 625 case FUSE_SETATTR: 626 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 627 break; 628 } 629 } 630 if (fc->minor < 12) { 631 switch (args->opcode) { 632 case FUSE_CREATE: 633 args->in_args[0].size = sizeof(struct fuse_open_in); 634 break; 635 case FUSE_MKNOD: 636 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; 637 break; 638 } 639 } 640 } 641 642 static void fuse_force_creds(struct fuse_req *req) 643 { 644 struct fuse_conn *fc = req->fm->fc; 645 646 if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) { 647 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); 648 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); 649 } else { 650 req->in.h.uid = FUSE_INVALID_UIDGID; 651 req->in.h.gid = FUSE_INVALID_UIDGID; 652 } 653 654 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); 655 } 656 657 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) 658 { 659 req->in.h.opcode = args->opcode; 660 req->in.h.nodeid = args->nodeid; 661 req->args = args; 662 if (args->is_ext) 663 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8; 664 if (args->end) 665 __set_bit(FR_ASYNC, &req->flags); 666 } 667 668 ssize_t __fuse_simple_request(struct mnt_idmap *idmap, 669 struct fuse_mount *fm, 670 struct fuse_args *args) 671 { 672 struct fuse_conn *fc = fm->fc; 673 struct fuse_req *req; 674 ssize_t ret; 675 676 if (args->force) { 677 atomic_inc(&fc->num_waiting); 678 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL); 679 680 if (!args->nocreds) 681 fuse_force_creds(req); 682 683 __set_bit(FR_WAITING, &req->flags); 684 if (!args->abort_on_kill) 685 __set_bit(FR_FORCE, &req->flags); 686 } else { 687 WARN_ON(args->nocreds); 688 req = fuse_get_req(idmap, fm, false); 689 if (IS_ERR(req)) 690 return PTR_ERR(req); 691 } 692 693 /* Needs to be done after fuse_get_req() so that fc->minor is valid */ 694 fuse_adjust_compat(fc, args); 695 fuse_args_to_req(req, args); 696 697 if (!args->noreply) 698 __set_bit(FR_ISREPLY, &req->flags); 699 __fuse_request_send(req); 700 ret = req->out.h.error; 701 if (!ret && args->out_argvar) { 702 BUG_ON(args->out_numargs == 0); 703 ret = args->out_args[args->out_numargs - 1].size; 704 } 705 fuse_put_request(req); 706 707 return ret; 708 } 709 710 #ifdef CONFIG_FUSE_IO_URING 711 static bool fuse_request_queue_background_uring(struct fuse_conn *fc, 712 struct fuse_req *req) 713 { 714 struct fuse_iqueue *fiq = &fc->iq; 715 716 req->in.h.len = sizeof(struct fuse_in_header) + 717 fuse_len_args(req->args->in_numargs, 718 (struct fuse_arg *) req->args->in_args); 719 fuse_request_assign_unique(fiq, req); 720 721 return fuse_uring_queue_bq_req(req); 722 } 723 #endif 724 725 /* 726 * @return true if queued 727 */ 728 static int fuse_request_queue_background(struct fuse_req *req) 729 { 730 struct fuse_mount *fm = req->fm; 731 struct fuse_conn *fc = fm->fc; 732 bool queued = false; 733 734 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); 735 if (!test_bit(FR_WAITING, &req->flags)) { 736 __set_bit(FR_WAITING, &req->flags); 737 atomic_inc(&fc->num_waiting); 738 } 739 __set_bit(FR_ISREPLY, &req->flags); 740 741 #ifdef CONFIG_FUSE_IO_URING 742 if (fuse_uring_ready(fc)) 743 return fuse_request_queue_background_uring(fc, req); 744 #endif 745 746 spin_lock(&fc->bg_lock); 747 if (likely(fc->connected)) { 748 fc->num_background++; 749 if (fc->num_background == fc->max_background) 750 fc->blocked = 1; 751 list_add_tail(&req->list, &fc->bg_queue); 752 flush_bg_queue(fc); 753 queued = true; 754 } 755 spin_unlock(&fc->bg_lock); 756 757 return queued; 758 } 759 760 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args, 761 gfp_t gfp_flags) 762 { 763 struct fuse_req *req; 764 765 if (args->force) { 766 WARN_ON(!args->nocreds); 767 req = fuse_request_alloc(fm, gfp_flags); 768 if (!req) 769 return -ENOMEM; 770 __set_bit(FR_BACKGROUND, &req->flags); 771 } else { 772 WARN_ON(args->nocreds); 773 req = fuse_get_req(&invalid_mnt_idmap, fm, true); 774 if (IS_ERR(req)) 775 return PTR_ERR(req); 776 } 777 778 fuse_args_to_req(req, args); 779 780 if (!fuse_request_queue_background(req)) { 781 fuse_put_request(req); 782 return -ENOTCONN; 783 } 784 785 return 0; 786 } 787 EXPORT_SYMBOL_GPL(fuse_simple_background); 788 789 static int fuse_simple_notify_reply(struct fuse_mount *fm, 790 struct fuse_args *args, u64 unique) 791 { 792 struct fuse_req *req; 793 struct fuse_iqueue *fiq = &fm->fc->iq; 794 795 req = fuse_get_req(&invalid_mnt_idmap, fm, false); 796 if (IS_ERR(req)) 797 return PTR_ERR(req); 798 799 __clear_bit(FR_ISREPLY, &req->flags); 800 req->in.h.unique = unique; 801 802 fuse_args_to_req(req, args); 803 804 fuse_send_one(fiq, req); 805 806 return 0; 807 } 808 809 /* 810 * Lock the request. Up to the next unlock_request() there mustn't be 811 * anything that could cause a page-fault. If the request was already 812 * aborted bail out. 813 */ 814 static int lock_request(struct fuse_req *req) 815 { 816 int err = 0; 817 if (req) { 818 spin_lock(&req->waitq.lock); 819 if (test_bit(FR_ABORTED, &req->flags)) 820 err = -ENOENT; 821 else 822 set_bit(FR_LOCKED, &req->flags); 823 spin_unlock(&req->waitq.lock); 824 } 825 return err; 826 } 827 828 /* 829 * Unlock request. If it was aborted while locked, caller is responsible 830 * for unlocking and ending the request. 831 */ 832 static int unlock_request(struct fuse_req *req) 833 { 834 int err = 0; 835 if (req) { 836 spin_lock(&req->waitq.lock); 837 if (test_bit(FR_ABORTED, &req->flags)) 838 err = -ENOENT; 839 else 840 clear_bit(FR_LOCKED, &req->flags); 841 spin_unlock(&req->waitq.lock); 842 } 843 return err; 844 } 845 846 void fuse_copy_init(struct fuse_copy_state *cs, bool write, 847 struct iov_iter *iter) 848 { 849 memset(cs, 0, sizeof(*cs)); 850 cs->write = write; 851 cs->iter = iter; 852 } 853 854 /* Unmap and put previous page of userspace buffer */ 855 void fuse_copy_finish(struct fuse_copy_state *cs) 856 { 857 if (cs->currbuf) { 858 struct pipe_buffer *buf = cs->currbuf; 859 860 if (cs->write) 861 buf->len = PAGE_SIZE - cs->len; 862 cs->currbuf = NULL; 863 } else if (cs->pg) { 864 if (cs->write) { 865 flush_dcache_page(cs->pg); 866 set_page_dirty_lock(cs->pg); 867 } 868 put_page(cs->pg); 869 } 870 cs->pg = NULL; 871 } 872 873 /* 874 * Get another pagefull of userspace buffer, and map it to kernel 875 * address space, and lock request 876 */ 877 static int fuse_copy_fill(struct fuse_copy_state *cs) 878 { 879 struct page *page; 880 int err; 881 882 err = unlock_request(cs->req); 883 if (err) 884 return err; 885 886 fuse_copy_finish(cs); 887 if (cs->pipebufs) { 888 struct pipe_buffer *buf = cs->pipebufs; 889 890 if (!cs->write) { 891 err = pipe_buf_confirm(cs->pipe, buf); 892 if (err) 893 return err; 894 895 BUG_ON(!cs->nr_segs); 896 cs->currbuf = buf; 897 cs->pg = buf->page; 898 cs->offset = buf->offset; 899 cs->len = buf->len; 900 cs->pipebufs++; 901 cs->nr_segs--; 902 } else { 903 if (cs->nr_segs >= cs->pipe->max_usage) 904 return -EIO; 905 906 page = alloc_page(GFP_HIGHUSER); 907 if (!page) 908 return -ENOMEM; 909 910 buf->page = page; 911 buf->offset = 0; 912 buf->len = 0; 913 914 cs->currbuf = buf; 915 cs->pg = page; 916 cs->offset = 0; 917 cs->len = PAGE_SIZE; 918 cs->pipebufs++; 919 cs->nr_segs++; 920 } 921 } else { 922 size_t off; 923 err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off); 924 if (err < 0) 925 return err; 926 BUG_ON(!err); 927 cs->len = err; 928 cs->offset = off; 929 cs->pg = page; 930 } 931 932 return lock_request(cs->req); 933 } 934 935 /* Do as much copy to/from userspace buffer as we can */ 936 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) 937 { 938 unsigned ncpy = min(*size, cs->len); 939 if (val) { 940 void *pgaddr = kmap_local_page(cs->pg); 941 void *buf = pgaddr + cs->offset; 942 943 if (cs->write) 944 memcpy(buf, *val, ncpy); 945 else 946 memcpy(*val, buf, ncpy); 947 948 kunmap_local(pgaddr); 949 *val += ncpy; 950 } 951 *size -= ncpy; 952 cs->len -= ncpy; 953 cs->offset += ncpy; 954 if (cs->is_uring) 955 cs->ring.copied_sz += ncpy; 956 957 return ncpy; 958 } 959 960 static int fuse_check_folio(struct folio *folio) 961 { 962 if (folio_mapped(folio) || 963 folio->mapping != NULL || 964 (folio->flags.f & PAGE_FLAGS_CHECK_AT_PREP & 965 ~(1 << PG_locked | 966 1 << PG_referenced | 967 1 << PG_lru | 968 1 << PG_active | 969 1 << PG_workingset | 970 1 << PG_reclaim | 971 1 << PG_waiters | 972 LRU_GEN_MASK | LRU_REFS_MASK))) { 973 dump_page(&folio->page, "fuse: trying to steal weird page"); 974 return 1; 975 } 976 return 0; 977 } 978 979 /* 980 * Attempt to steal a page from the splice() pipe and move it into the 981 * pagecache. If successful, the pointer in @pagep will be updated. The 982 * folio that was originally in @pagep will lose a reference and the new 983 * folio returned in @pagep will carry a reference. 984 */ 985 static int fuse_try_move_folio(struct fuse_copy_state *cs, struct folio **foliop) 986 { 987 int err; 988 struct folio *oldfolio = *foliop; 989 struct folio *newfolio; 990 struct pipe_buffer *buf = cs->pipebufs; 991 992 folio_get(oldfolio); 993 err = unlock_request(cs->req); 994 if (err) 995 goto out_put_old; 996 997 fuse_copy_finish(cs); 998 999 err = pipe_buf_confirm(cs->pipe, buf); 1000 if (err) 1001 goto out_put_old; 1002 1003 BUG_ON(!cs->nr_segs); 1004 cs->currbuf = buf; 1005 cs->len = buf->len; 1006 cs->pipebufs++; 1007 cs->nr_segs--; 1008 1009 if (cs->len != folio_size(oldfolio)) 1010 goto out_fallback; 1011 1012 if (!pipe_buf_try_steal(cs->pipe, buf)) 1013 goto out_fallback; 1014 1015 newfolio = page_folio(buf->page); 1016 1017 folio_clear_uptodate(newfolio); 1018 folio_clear_mappedtodisk(newfolio); 1019 1020 if (folio_test_large(newfolio)) 1021 goto out_fallback_unlock; 1022 1023 if (fuse_check_folio(newfolio) != 0) 1024 goto out_fallback_unlock; 1025 1026 /* 1027 * This is a new and locked page, it shouldn't be mapped or 1028 * have any special flags on it 1029 */ 1030 if (WARN_ON(folio_mapped(oldfolio))) 1031 goto out_fallback_unlock; 1032 if (WARN_ON(folio_has_private(oldfolio))) 1033 goto out_fallback_unlock; 1034 if (WARN_ON(folio_test_dirty(oldfolio) || 1035 folio_test_writeback(oldfolio))) 1036 goto out_fallback_unlock; 1037 if (WARN_ON(folio_test_mlocked(oldfolio))) 1038 goto out_fallback_unlock; 1039 1040 replace_page_cache_folio(oldfolio, newfolio); 1041 1042 folio_get(newfolio); 1043 1044 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 1045 folio_add_lru(newfolio); 1046 1047 /* 1048 * Release while we have extra ref on stolen page. Otherwise 1049 * anon_pipe_buf_release() might think the page can be reused. 1050 */ 1051 pipe_buf_release(cs->pipe, buf); 1052 1053 err = 0; 1054 spin_lock(&cs->req->waitq.lock); 1055 if (test_bit(FR_ABORTED, &cs->req->flags)) 1056 err = -ENOENT; 1057 else 1058 *foliop = newfolio; 1059 spin_unlock(&cs->req->waitq.lock); 1060 1061 if (err) { 1062 folio_unlock(newfolio); 1063 folio_put(newfolio); 1064 goto out_put_old; 1065 } 1066 1067 folio_unlock(oldfolio); 1068 /* Drop ref for ap->pages[] array */ 1069 folio_put(oldfolio); 1070 cs->len = 0; 1071 1072 err = 0; 1073 out_put_old: 1074 /* Drop ref obtained in this function */ 1075 folio_put(oldfolio); 1076 return err; 1077 1078 out_fallback_unlock: 1079 folio_unlock(newfolio); 1080 out_fallback: 1081 cs->pg = buf->page; 1082 cs->offset = buf->offset; 1083 1084 err = lock_request(cs->req); 1085 if (!err) 1086 err = 1; 1087 1088 goto out_put_old; 1089 } 1090 1091 static int fuse_ref_folio(struct fuse_copy_state *cs, struct folio *folio, 1092 unsigned offset, unsigned count) 1093 { 1094 struct pipe_buffer *buf; 1095 int err; 1096 1097 if (cs->nr_segs >= cs->pipe->max_usage) 1098 return -EIO; 1099 1100 folio_get(folio); 1101 err = unlock_request(cs->req); 1102 if (err) { 1103 folio_put(folio); 1104 return err; 1105 } 1106 1107 fuse_copy_finish(cs); 1108 1109 buf = cs->pipebufs; 1110 buf->page = &folio->page; 1111 buf->offset = offset; 1112 buf->len = count; 1113 1114 cs->pipebufs++; 1115 cs->nr_segs++; 1116 cs->len = 0; 1117 1118 return 0; 1119 } 1120 1121 /* 1122 * Copy a folio in the request to/from the userspace buffer. Must be 1123 * done atomically 1124 */ 1125 static int fuse_copy_folio(struct fuse_copy_state *cs, struct folio **foliop, 1126 unsigned offset, unsigned count, int zeroing) 1127 { 1128 int err; 1129 struct folio *folio = *foliop; 1130 size_t size; 1131 1132 if (folio) { 1133 size = folio_size(folio); 1134 if (zeroing && count < size) 1135 folio_zero_range(folio, 0, size); 1136 } 1137 1138 while (count) { 1139 if (cs->write && cs->pipebufs && folio) { 1140 /* 1141 * Can't control lifetime of pipe buffers, so always 1142 * copy user pages. 1143 */ 1144 if (cs->req->args->user_pages) { 1145 err = fuse_copy_fill(cs); 1146 if (err) 1147 return err; 1148 } else { 1149 return fuse_ref_folio(cs, folio, offset, count); 1150 } 1151 } else if (!cs->len) { 1152 if (cs->move_folios && folio && 1153 offset == 0 && count == size) { 1154 err = fuse_try_move_folio(cs, foliop); 1155 if (err <= 0) 1156 return err; 1157 } else { 1158 err = fuse_copy_fill(cs); 1159 if (err) 1160 return err; 1161 } 1162 } 1163 if (folio) { 1164 void *mapaddr = kmap_local_folio(folio, offset); 1165 void *buf = mapaddr; 1166 unsigned int copy = count; 1167 unsigned int bytes_copied; 1168 1169 if (folio_test_highmem(folio) && count > PAGE_SIZE - offset_in_page(offset)) 1170 copy = PAGE_SIZE - offset_in_page(offset); 1171 1172 bytes_copied = fuse_copy_do(cs, &buf, ©); 1173 kunmap_local(mapaddr); 1174 offset += bytes_copied; 1175 count -= bytes_copied; 1176 } else 1177 offset += fuse_copy_do(cs, NULL, &count); 1178 } 1179 if (folio && !cs->write) 1180 flush_dcache_folio(folio); 1181 return 0; 1182 } 1183 1184 /* Copy folios in the request to/from userspace buffer */ 1185 static int fuse_copy_folios(struct fuse_copy_state *cs, unsigned nbytes, 1186 int zeroing) 1187 { 1188 unsigned i; 1189 struct fuse_req *req = cs->req; 1190 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); 1191 1192 for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) { 1193 int err; 1194 unsigned int offset = ap->descs[i].offset; 1195 unsigned int count = min(nbytes, ap->descs[i].length); 1196 1197 err = fuse_copy_folio(cs, &ap->folios[i], offset, count, zeroing); 1198 if (err) 1199 return err; 1200 1201 nbytes -= count; 1202 } 1203 return 0; 1204 } 1205 1206 /* Copy a single argument in the request to/from userspace buffer */ 1207 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) 1208 { 1209 while (size) { 1210 if (!cs->len) { 1211 int err = fuse_copy_fill(cs); 1212 if (err) 1213 return err; 1214 } 1215 fuse_copy_do(cs, &val, &size); 1216 } 1217 return 0; 1218 } 1219 1220 /* Copy request arguments to/from userspace buffer */ 1221 int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, 1222 unsigned argpages, struct fuse_arg *args, 1223 int zeroing) 1224 { 1225 int err = 0; 1226 unsigned i; 1227 1228 for (i = 0; !err && i < numargs; i++) { 1229 struct fuse_arg *arg = &args[i]; 1230 if (i == numargs - 1 && argpages) 1231 err = fuse_copy_folios(cs, arg->size, zeroing); 1232 else 1233 err = fuse_copy_one(cs, arg->value, arg->size); 1234 } 1235 return err; 1236 } 1237 1238 static int forget_pending(struct fuse_iqueue *fiq) 1239 { 1240 return fiq->forget_list_head.next != NULL; 1241 } 1242 1243 static int request_pending(struct fuse_iqueue *fiq) 1244 { 1245 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || 1246 forget_pending(fiq); 1247 } 1248 1249 /* 1250 * Transfer an interrupt request to userspace 1251 * 1252 * Unlike other requests this is assembled on demand, without a need 1253 * to allocate a separate fuse_req structure. 1254 * 1255 * Called with fiq->lock held, releases it 1256 */ 1257 static int fuse_read_interrupt(struct fuse_iqueue *fiq, 1258 struct fuse_copy_state *cs, 1259 size_t nbytes, struct fuse_req *req) 1260 __releases(fiq->lock) 1261 { 1262 struct fuse_in_header ih; 1263 struct fuse_interrupt_in arg; 1264 unsigned reqsize = sizeof(ih) + sizeof(arg); 1265 int err; 1266 1267 list_del_init(&req->intr_entry); 1268 memset(&ih, 0, sizeof(ih)); 1269 memset(&arg, 0, sizeof(arg)); 1270 ih.len = reqsize; 1271 ih.opcode = FUSE_INTERRUPT; 1272 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); 1273 arg.unique = req->in.h.unique; 1274 1275 spin_unlock(&fiq->lock); 1276 if (nbytes < reqsize) 1277 return -EINVAL; 1278 1279 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1280 if (!err) 1281 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1282 fuse_copy_finish(cs); 1283 1284 return err ? err : reqsize; 1285 } 1286 1287 static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, 1288 unsigned int max, 1289 unsigned int *countp) 1290 { 1291 struct fuse_forget_link *head = fiq->forget_list_head.next; 1292 struct fuse_forget_link **newhead = &head; 1293 unsigned count; 1294 1295 for (count = 0; *newhead != NULL && count < max; count++) 1296 newhead = &(*newhead)->next; 1297 1298 fiq->forget_list_head.next = *newhead; 1299 *newhead = NULL; 1300 if (fiq->forget_list_head.next == NULL) 1301 fiq->forget_list_tail = &fiq->forget_list_head; 1302 1303 if (countp != NULL) 1304 *countp = count; 1305 1306 return head; 1307 } 1308 1309 static int fuse_read_single_forget(struct fuse_iqueue *fiq, 1310 struct fuse_copy_state *cs, 1311 size_t nbytes) 1312 __releases(fiq->lock) 1313 { 1314 int err; 1315 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); 1316 struct fuse_forget_in arg = { 1317 .nlookup = forget->forget_one.nlookup, 1318 }; 1319 struct fuse_in_header ih = { 1320 .opcode = FUSE_FORGET, 1321 .nodeid = forget->forget_one.nodeid, 1322 .unique = fuse_get_unique_locked(fiq), 1323 .len = sizeof(ih) + sizeof(arg), 1324 }; 1325 1326 spin_unlock(&fiq->lock); 1327 kfree(forget); 1328 if (nbytes < ih.len) 1329 return -EINVAL; 1330 1331 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1332 if (!err) 1333 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1334 fuse_copy_finish(cs); 1335 1336 if (err) 1337 return err; 1338 1339 return ih.len; 1340 } 1341 1342 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, 1343 struct fuse_copy_state *cs, size_t nbytes) 1344 __releases(fiq->lock) 1345 { 1346 int err; 1347 unsigned max_forgets; 1348 unsigned count; 1349 struct fuse_forget_link *head; 1350 struct fuse_batch_forget_in arg = { .count = 0 }; 1351 struct fuse_in_header ih = { 1352 .opcode = FUSE_BATCH_FORGET, 1353 .unique = fuse_get_unique_locked(fiq), 1354 .len = sizeof(ih) + sizeof(arg), 1355 }; 1356 1357 if (nbytes < ih.len) { 1358 spin_unlock(&fiq->lock); 1359 return -EINVAL; 1360 } 1361 1362 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); 1363 head = fuse_dequeue_forget(fiq, max_forgets, &count); 1364 spin_unlock(&fiq->lock); 1365 1366 arg.count = count; 1367 ih.len += count * sizeof(struct fuse_forget_one); 1368 err = fuse_copy_one(cs, &ih, sizeof(ih)); 1369 if (!err) 1370 err = fuse_copy_one(cs, &arg, sizeof(arg)); 1371 1372 while (head) { 1373 struct fuse_forget_link *forget = head; 1374 1375 if (!err) { 1376 err = fuse_copy_one(cs, &forget->forget_one, 1377 sizeof(forget->forget_one)); 1378 } 1379 head = forget->next; 1380 kfree(forget); 1381 } 1382 1383 fuse_copy_finish(cs); 1384 1385 if (err) 1386 return err; 1387 1388 return ih.len; 1389 } 1390 1391 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, 1392 struct fuse_copy_state *cs, 1393 size_t nbytes) 1394 __releases(fiq->lock) 1395 { 1396 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) 1397 return fuse_read_single_forget(fiq, cs, nbytes); 1398 else 1399 return fuse_read_batch_forget(fiq, cs, nbytes); 1400 } 1401 1402 /* 1403 * Read a single request into the userspace filesystem's buffer. This 1404 * function waits until a request is available, then removes it from 1405 * the pending list and copies request data to userspace buffer. If 1406 * no reply is needed (FORGET) or request has been aborted or there 1407 * was an error during the copying then it's finished by calling 1408 * fuse_request_end(). Otherwise add it to the processing list, and set 1409 * the 'sent' flag. 1410 */ 1411 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, 1412 struct fuse_copy_state *cs, size_t nbytes) 1413 { 1414 ssize_t err; 1415 struct fuse_conn *fc = fud->fc; 1416 struct fuse_iqueue *fiq = &fc->iq; 1417 struct fuse_pqueue *fpq = &fud->pq; 1418 struct fuse_req *req; 1419 struct fuse_args *args; 1420 unsigned reqsize; 1421 unsigned int hash; 1422 1423 /* 1424 * Require sane minimum read buffer - that has capacity for fixed part 1425 * of any request header + negotiated max_write room for data. 1426 * 1427 * Historically libfuse reserves 4K for fixed header room, but e.g. 1428 * GlusterFS reserves only 80 bytes 1429 * 1430 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)` 1431 * 1432 * which is the absolute minimum any sane filesystem should be using 1433 * for header room. 1434 */ 1435 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, 1436 sizeof(struct fuse_in_header) + 1437 sizeof(struct fuse_write_in) + 1438 fc->max_write)) 1439 return -EINVAL; 1440 1441 restart: 1442 for (;;) { 1443 spin_lock(&fiq->lock); 1444 if (!fiq->connected || request_pending(fiq)) 1445 break; 1446 spin_unlock(&fiq->lock); 1447 1448 if (file->f_flags & O_NONBLOCK) 1449 return -EAGAIN; 1450 err = wait_event_interruptible_exclusive(fiq->waitq, 1451 !fiq->connected || request_pending(fiq)); 1452 if (err) 1453 return err; 1454 } 1455 1456 if (!fiq->connected) { 1457 err = fc->aborted ? -ECONNABORTED : -ENODEV; 1458 goto err_unlock; 1459 } 1460 1461 if (!list_empty(&fiq->interrupts)) { 1462 req = list_entry(fiq->interrupts.next, struct fuse_req, 1463 intr_entry); 1464 return fuse_read_interrupt(fiq, cs, nbytes, req); 1465 } 1466 1467 if (forget_pending(fiq)) { 1468 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) 1469 return fuse_read_forget(fc, fiq, cs, nbytes); 1470 1471 if (fiq->forget_batch <= -8) 1472 fiq->forget_batch = 16; 1473 } 1474 1475 req = list_entry(fiq->pending.next, struct fuse_req, list); 1476 clear_bit(FR_PENDING, &req->flags); 1477 list_del_init(&req->list); 1478 spin_unlock(&fiq->lock); 1479 1480 args = req->args; 1481 reqsize = req->in.h.len; 1482 1483 /* If request is too large, reply with an error and restart the read */ 1484 if (nbytes < reqsize) { 1485 req->out.h.error = -EIO; 1486 /* SETXATTR is special, since it may contain too large data */ 1487 if (args->opcode == FUSE_SETXATTR) 1488 req->out.h.error = -E2BIG; 1489 fuse_request_end(req); 1490 goto restart; 1491 } 1492 spin_lock(&fpq->lock); 1493 /* 1494 * Must not put request on fpq->io queue after having been shut down by 1495 * fuse_abort_conn() 1496 */ 1497 if (!fpq->connected) { 1498 req->out.h.error = err = -ECONNABORTED; 1499 goto out_end; 1500 1501 } 1502 list_add(&req->list, &fpq->io); 1503 spin_unlock(&fpq->lock); 1504 cs->req = req; 1505 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); 1506 if (!err) 1507 err = fuse_copy_args(cs, args->in_numargs, args->in_pages, 1508 (struct fuse_arg *) args->in_args, 0); 1509 fuse_copy_finish(cs); 1510 spin_lock(&fpq->lock); 1511 clear_bit(FR_LOCKED, &req->flags); 1512 if (!fpq->connected) { 1513 err = fc->aborted ? -ECONNABORTED : -ENODEV; 1514 goto out_end; 1515 } 1516 if (err) { 1517 req->out.h.error = -EIO; 1518 goto out_end; 1519 } 1520 if (!test_bit(FR_ISREPLY, &req->flags)) { 1521 err = reqsize; 1522 goto out_end; 1523 } 1524 hash = fuse_req_hash(req->in.h.unique); 1525 list_move_tail(&req->list, &fpq->processing[hash]); 1526 __fuse_get_request(req); 1527 set_bit(FR_SENT, &req->flags); 1528 spin_unlock(&fpq->lock); 1529 /* matches barrier in request_wait_answer() */ 1530 smp_mb__after_atomic(); 1531 if (test_bit(FR_INTERRUPTED, &req->flags)) 1532 queue_interrupt(req); 1533 fuse_put_request(req); 1534 1535 return reqsize; 1536 1537 out_end: 1538 if (!test_bit(FR_PRIVATE, &req->flags)) 1539 list_del_init(&req->list); 1540 spin_unlock(&fpq->lock); 1541 fuse_request_end(req); 1542 return err; 1543 1544 err_unlock: 1545 spin_unlock(&fiq->lock); 1546 return err; 1547 } 1548 1549 static int fuse_dev_open(struct inode *inode, struct file *file) 1550 { 1551 struct fuse_dev *fud = fuse_dev_alloc(); 1552 1553 if (!fud) 1554 return -ENOMEM; 1555 1556 file->private_data = fud; 1557 return 0; 1558 } 1559 1560 struct fuse_dev *fuse_get_dev(struct file *file) 1561 { 1562 struct fuse_dev *fud = fuse_file_to_fud(file); 1563 int err; 1564 1565 err = wait_event_interruptible(fuse_dev_waitq, fuse_dev_fc_get(fud) != NULL); 1566 if (err) 1567 return ERR_PTR(err); 1568 1569 return fud; 1570 } 1571 1572 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) 1573 { 1574 struct fuse_copy_state cs; 1575 struct file *file = iocb->ki_filp; 1576 struct fuse_dev *fud = fuse_get_dev(file); 1577 1578 if (IS_ERR(fud)) 1579 return PTR_ERR(fud); 1580 1581 if (!user_backed_iter(to)) 1582 return -EINVAL; 1583 1584 fuse_copy_init(&cs, true, to); 1585 1586 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); 1587 } 1588 1589 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, 1590 struct pipe_inode_info *pipe, 1591 size_t len, unsigned int flags) 1592 { 1593 int total, ret; 1594 int page_nr = 0; 1595 struct pipe_buffer *bufs; 1596 struct fuse_copy_state cs; 1597 struct fuse_dev *fud = fuse_get_dev(in); 1598 1599 if (IS_ERR(fud)) 1600 return PTR_ERR(fud); 1601 1602 bufs = kvmalloc_objs(struct pipe_buffer, pipe->max_usage); 1603 if (!bufs) 1604 return -ENOMEM; 1605 1606 fuse_copy_init(&cs, true, NULL); 1607 cs.pipebufs = bufs; 1608 cs.pipe = pipe; 1609 ret = fuse_dev_do_read(fud, in, &cs, len); 1610 if (ret < 0) 1611 goto out; 1612 1613 if (pipe_buf_usage(pipe) + cs.nr_segs > pipe->max_usage) { 1614 ret = -EIO; 1615 goto out; 1616 } 1617 1618 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { 1619 /* 1620 * Need to be careful about this. Having buf->ops in module 1621 * code can Oops if the buffer persists after module unload. 1622 */ 1623 bufs[page_nr].ops = &nosteal_pipe_buf_ops; 1624 bufs[page_nr].flags = 0; 1625 ret = add_to_pipe(pipe, &bufs[page_nr++]); 1626 if (unlikely(ret < 0)) 1627 break; 1628 } 1629 if (total) 1630 ret = total; 1631 out: 1632 for (; page_nr < cs.nr_segs; page_nr++) 1633 put_page(bufs[page_nr].page); 1634 1635 kvfree(bufs); 1636 return ret; 1637 } 1638 1639 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1640 struct fuse_copy_state *cs) 1641 { 1642 struct fuse_notify_poll_wakeup_out outarg; 1643 int err; 1644 1645 if (size != sizeof(outarg)) 1646 return -EINVAL; 1647 1648 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1649 if (err) 1650 return err; 1651 1652 fuse_copy_finish(cs); 1653 return fuse_notify_poll_wakeup(fc, &outarg); 1654 } 1655 1656 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, 1657 struct fuse_copy_state *cs) 1658 { 1659 struct fuse_notify_inval_inode_out outarg; 1660 int err; 1661 1662 if (size != sizeof(outarg)) 1663 return -EINVAL; 1664 1665 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1666 if (err) 1667 return err; 1668 fuse_copy_finish(cs); 1669 1670 down_read(&fc->killsb); 1671 err = fuse_reverse_inval_inode(fc, outarg.ino, 1672 outarg.off, outarg.len); 1673 up_read(&fc->killsb); 1674 return err; 1675 } 1676 1677 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, 1678 struct fuse_copy_state *cs) 1679 { 1680 struct fuse_notify_inval_entry_out outarg; 1681 int err; 1682 char *buf; 1683 struct qstr name; 1684 1685 if (size < sizeof(outarg)) 1686 return -EINVAL; 1687 1688 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1689 if (err) 1690 return err; 1691 1692 if (outarg.namelen > fc->name_max) 1693 return -ENAMETOOLONG; 1694 1695 err = -EINVAL; 1696 if (size != sizeof(outarg) + outarg.namelen + 1) 1697 return -EINVAL; 1698 1699 buf = kzalloc(outarg.namelen + 1, GFP_KERNEL); 1700 if (!buf) 1701 return -ENOMEM; 1702 1703 name.name = buf; 1704 name.len = outarg.namelen; 1705 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1706 if (err) 1707 goto err; 1708 fuse_copy_finish(cs); 1709 buf[outarg.namelen] = 0; 1710 1711 down_read(&fc->killsb); 1712 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags); 1713 up_read(&fc->killsb); 1714 err: 1715 kfree(buf); 1716 return err; 1717 } 1718 1719 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, 1720 struct fuse_copy_state *cs) 1721 { 1722 struct fuse_notify_delete_out outarg; 1723 int err; 1724 char *buf; 1725 struct qstr name; 1726 1727 if (size < sizeof(outarg)) 1728 return -EINVAL; 1729 1730 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1731 if (err) 1732 return err; 1733 1734 if (outarg.namelen > fc->name_max) 1735 return -ENAMETOOLONG; 1736 1737 if (size != sizeof(outarg) + outarg.namelen + 1) 1738 return -EINVAL; 1739 1740 buf = kzalloc(outarg.namelen + 1, GFP_KERNEL); 1741 if (!buf) 1742 return -ENOMEM; 1743 1744 name.name = buf; 1745 name.len = outarg.namelen; 1746 err = fuse_copy_one(cs, buf, outarg.namelen + 1); 1747 if (err) 1748 goto err; 1749 fuse_copy_finish(cs); 1750 buf[outarg.namelen] = 0; 1751 1752 down_read(&fc->killsb); 1753 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0); 1754 up_read(&fc->killsb); 1755 err: 1756 kfree(buf); 1757 return err; 1758 } 1759 1760 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, 1761 struct fuse_copy_state *cs) 1762 { 1763 struct fuse_notify_store_out outarg; 1764 struct inode *inode; 1765 struct address_space *mapping; 1766 u64 nodeid; 1767 int err; 1768 unsigned int num; 1769 loff_t file_size; 1770 loff_t pos; 1771 loff_t end; 1772 1773 if (size < sizeof(outarg)) 1774 return -EINVAL; 1775 1776 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1777 if (err) 1778 return err; 1779 1780 if (size - sizeof(outarg) != outarg.size) 1781 return -EINVAL; 1782 1783 if (outarg.offset >= MAX_LFS_FILESIZE) 1784 return -EINVAL; 1785 1786 nodeid = outarg.nodeid; 1787 pos = outarg.offset; 1788 num = min(outarg.size, MAX_LFS_FILESIZE - pos); 1789 1790 down_read(&fc->killsb); 1791 1792 err = -ENOENT; 1793 inode = fuse_ilookup(fc, nodeid, NULL); 1794 if (!inode) 1795 goto out_up_killsb; 1796 1797 mapping = inode->i_mapping; 1798 file_size = i_size_read(inode); 1799 end = pos + num; 1800 if (end > file_size) { 1801 file_size = end; 1802 fuse_write_update_attr(inode, file_size, num); 1803 } 1804 1805 while (num) { 1806 struct folio *folio; 1807 unsigned int folio_offset; 1808 unsigned int nr_bytes; 1809 pgoff_t index = pos >> PAGE_SHIFT; 1810 1811 folio = filemap_grab_folio(mapping, index); 1812 err = PTR_ERR(folio); 1813 if (IS_ERR(folio)) 1814 goto out_iput; 1815 1816 folio_offset = offset_in_folio(folio, pos); 1817 nr_bytes = min(num, folio_size(folio) - folio_offset); 1818 1819 err = fuse_copy_folio(cs, &folio, folio_offset, nr_bytes, 0); 1820 if (!folio_test_uptodate(folio) && !err && folio_offset == 0 && 1821 (nr_bytes == folio_size(folio) || file_size == end)) { 1822 folio_zero_segment(folio, nr_bytes, folio_size(folio)); 1823 folio_mark_uptodate(folio); 1824 } 1825 folio_unlock(folio); 1826 folio_put(folio); 1827 1828 if (err) 1829 goto out_iput; 1830 1831 pos += nr_bytes; 1832 num -= nr_bytes; 1833 } 1834 1835 err = 0; 1836 1837 out_iput: 1838 iput(inode); 1839 out_up_killsb: 1840 up_read(&fc->killsb); 1841 return err; 1842 } 1843 1844 struct fuse_retrieve_args { 1845 struct fuse_args_pages ap; 1846 struct fuse_notify_retrieve_in inarg; 1847 }; 1848 1849 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, 1850 int error) 1851 { 1852 struct fuse_retrieve_args *ra = 1853 container_of(args, typeof(*ra), ap.args); 1854 1855 release_pages(ra->ap.folios, ra->ap.num_folios); 1856 kfree(ra); 1857 } 1858 1859 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, 1860 struct fuse_notify_retrieve_out *outarg) 1861 { 1862 int err; 1863 struct address_space *mapping = inode->i_mapping; 1864 loff_t file_size; 1865 unsigned int num; 1866 unsigned int offset; 1867 size_t total_len = 0; 1868 unsigned int num_pages; 1869 struct fuse_conn *fc = fm->fc; 1870 struct fuse_retrieve_args *ra; 1871 size_t args_size = sizeof(*ra); 1872 struct fuse_args_pages *ap; 1873 struct fuse_args *args; 1874 loff_t pos = outarg->offset; 1875 1876 offset = offset_in_page(pos); 1877 file_size = i_size_read(inode); 1878 1879 num = min(outarg->size, fc->max_write); 1880 if (pos > file_size) 1881 num = 0; 1882 else if (num > file_size - pos) 1883 num = file_size - pos; 1884 1885 num_pages = DIV_ROUND_UP(num + offset, PAGE_SIZE); 1886 num_pages = min(num_pages, fc->max_pages); 1887 num = min(num, num_pages << PAGE_SHIFT); 1888 1889 args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->descs[0])); 1890 1891 ra = kzalloc(args_size, GFP_KERNEL); 1892 if (!ra) 1893 return -ENOMEM; 1894 1895 ap = &ra->ap; 1896 ap->folios = (void *) (ra + 1); 1897 ap->descs = (void *) (ap->folios + num_pages); 1898 1899 args = &ap->args; 1900 args->nodeid = outarg->nodeid; 1901 args->opcode = FUSE_NOTIFY_REPLY; 1902 args->in_numargs = 3; 1903 args->in_pages = true; 1904 args->end = fuse_retrieve_end; 1905 1906 while (num && ap->num_folios < num_pages) { 1907 struct folio *folio; 1908 unsigned int folio_offset; 1909 unsigned int nr_bytes; 1910 pgoff_t index = pos >> PAGE_SHIFT; 1911 1912 folio = filemap_get_folio(mapping, index); 1913 if (IS_ERR(folio)) 1914 break; 1915 1916 folio_offset = offset_in_folio(folio, pos); 1917 nr_bytes = min(folio_size(folio) - folio_offset, num); 1918 1919 ap->folios[ap->num_folios] = folio; 1920 ap->descs[ap->num_folios].offset = folio_offset; 1921 ap->descs[ap->num_folios].length = nr_bytes; 1922 ap->num_folios++; 1923 1924 pos += nr_bytes; 1925 num -= nr_bytes; 1926 total_len += nr_bytes; 1927 } 1928 ra->inarg.offset = outarg->offset; 1929 ra->inarg.size = total_len; 1930 fuse_set_zero_arg0(args); 1931 args->in_args[1].size = sizeof(ra->inarg); 1932 args->in_args[1].value = &ra->inarg; 1933 args->in_args[2].size = total_len; 1934 1935 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique); 1936 if (err) 1937 fuse_retrieve_end(fm, args, err); 1938 1939 return err; 1940 } 1941 1942 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, 1943 struct fuse_copy_state *cs) 1944 { 1945 struct fuse_notify_retrieve_out outarg; 1946 struct fuse_mount *fm; 1947 struct inode *inode; 1948 u64 nodeid; 1949 int err; 1950 1951 if (size != sizeof(outarg)) 1952 return -EINVAL; 1953 1954 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 1955 if (err) 1956 return err; 1957 1958 fuse_copy_finish(cs); 1959 1960 if (outarg.offset >= MAX_LFS_FILESIZE) 1961 return -EINVAL; 1962 1963 down_read(&fc->killsb); 1964 err = -ENOENT; 1965 nodeid = outarg.nodeid; 1966 1967 inode = fuse_ilookup(fc, nodeid, &fm); 1968 if (inode) { 1969 err = fuse_retrieve(fm, inode, &outarg); 1970 iput(inode); 1971 } 1972 up_read(&fc->killsb); 1973 1974 return err; 1975 } 1976 1977 /* 1978 * Resending all processing queue requests. 1979 * 1980 * During a FUSE daemon panics and failover, it is possible for some inflight 1981 * requests to be lost and never returned. As a result, applications awaiting 1982 * replies would become stuck forever. To address this, we can use notification 1983 * to trigger resending of these pending requests to the FUSE daemon, ensuring 1984 * they are properly processed again. 1985 * 1986 * Please note that this strategy is applicable only to idempotent requests or 1987 * if the FUSE daemon takes careful measures to avoid processing duplicated 1988 * non-idempotent requests. 1989 */ 1990 static void fuse_resend(struct fuse_conn *fc) 1991 { 1992 struct fuse_dev *fud; 1993 struct fuse_req *req, *next; 1994 struct fuse_iqueue *fiq = &fc->iq; 1995 LIST_HEAD(to_queue); 1996 unsigned int i; 1997 1998 spin_lock(&fc->lock); 1999 if (!fc->connected) { 2000 spin_unlock(&fc->lock); 2001 return; 2002 } 2003 2004 list_for_each_entry(fud, &fc->devices, entry) { 2005 struct fuse_pqueue *fpq = &fud->pq; 2006 2007 spin_lock(&fpq->lock); 2008 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2009 list_splice_tail_init(&fpq->processing[i], &to_queue); 2010 spin_unlock(&fpq->lock); 2011 } 2012 spin_unlock(&fc->lock); 2013 2014 list_for_each_entry_safe(req, next, &to_queue, list) { 2015 set_bit(FR_PENDING, &req->flags); 2016 clear_bit(FR_SENT, &req->flags); 2017 /* mark the request as resend request */ 2018 req->in.h.unique |= FUSE_UNIQUE_RESEND; 2019 } 2020 2021 spin_lock(&fiq->lock); 2022 if (!fiq->connected) { 2023 spin_unlock(&fiq->lock); 2024 list_for_each_entry(req, &to_queue, list) 2025 clear_bit(FR_PENDING, &req->flags); 2026 fuse_dev_end_requests(&to_queue); 2027 return; 2028 } 2029 /* iq and pq requests are both oldest to newest */ 2030 list_splice(&to_queue, &fiq->pending); 2031 fuse_dev_wake_and_unlock(fiq); 2032 } 2033 2034 static int fuse_notify_resend(struct fuse_conn *fc) 2035 { 2036 fuse_resend(fc); 2037 return 0; 2038 } 2039 2040 /* 2041 * Increments the fuse connection epoch. This will result of dentries from 2042 * previous epochs to be invalidated. Additionally, if inval_wq is set, a work 2043 * queue is scheduled to trigger the invalidation. 2044 */ 2045 static int fuse_notify_inc_epoch(struct fuse_conn *fc) 2046 { 2047 atomic_inc(&fc->epoch); 2048 if (inval_wq) 2049 schedule_work(&fc->epoch_work); 2050 2051 return 0; 2052 } 2053 2054 static int fuse_notify_prune(struct fuse_conn *fc, unsigned int size, 2055 struct fuse_copy_state *cs) 2056 { 2057 struct fuse_notify_prune_out outarg; 2058 const unsigned int batch = 512; 2059 u64 *nodeids __free(kfree) = kmalloc(sizeof(u64) * batch, GFP_KERNEL); 2060 unsigned int num, i; 2061 int err; 2062 2063 if (!nodeids) 2064 return -ENOMEM; 2065 2066 if (size < sizeof(outarg)) 2067 return -EINVAL; 2068 2069 err = fuse_copy_one(cs, &outarg, sizeof(outarg)); 2070 if (err) 2071 return err; 2072 2073 if (size - sizeof(outarg) != outarg.count * sizeof(u64)) 2074 return -EINVAL; 2075 2076 for (; outarg.count; outarg.count -= num) { 2077 num = min(batch, outarg.count); 2078 err = fuse_copy_one(cs, nodeids, num * sizeof(u64)); 2079 if (err) 2080 return err; 2081 2082 scoped_guard(rwsem_read, &fc->killsb) { 2083 for (i = 0; i < num; i++) 2084 fuse_try_prune_one_inode(fc, nodeids[i]); 2085 } 2086 } 2087 return 0; 2088 } 2089 2090 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, 2091 unsigned int size, struct fuse_copy_state *cs) 2092 { 2093 /* 2094 * Only allow notifications during while the connection is in an 2095 * initialized and connected state 2096 */ 2097 if (!fc->initialized || !fc->connected) 2098 return -EINVAL; 2099 2100 /* Don't try to move folios (yet) */ 2101 cs->move_folios = false; 2102 2103 switch (code) { 2104 case FUSE_NOTIFY_POLL: 2105 return fuse_notify_poll(fc, size, cs); 2106 2107 case FUSE_NOTIFY_INVAL_INODE: 2108 return fuse_notify_inval_inode(fc, size, cs); 2109 2110 case FUSE_NOTIFY_INVAL_ENTRY: 2111 return fuse_notify_inval_entry(fc, size, cs); 2112 2113 case FUSE_NOTIFY_STORE: 2114 return fuse_notify_store(fc, size, cs); 2115 2116 case FUSE_NOTIFY_RETRIEVE: 2117 return fuse_notify_retrieve(fc, size, cs); 2118 2119 case FUSE_NOTIFY_DELETE: 2120 return fuse_notify_delete(fc, size, cs); 2121 2122 case FUSE_NOTIFY_RESEND: 2123 return fuse_notify_resend(fc); 2124 2125 case FUSE_NOTIFY_INC_EPOCH: 2126 return fuse_notify_inc_epoch(fc); 2127 2128 case FUSE_NOTIFY_PRUNE: 2129 return fuse_notify_prune(fc, size, cs); 2130 2131 default: 2132 return -EINVAL; 2133 } 2134 } 2135 2136 /* Look up request on processing list by unique ID */ 2137 struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique) 2138 { 2139 unsigned int hash = fuse_req_hash(unique); 2140 struct fuse_req *req; 2141 2142 list_for_each_entry(req, &fpq->processing[hash], list) { 2143 if (req->in.h.unique == unique) 2144 return req; 2145 } 2146 return NULL; 2147 } 2148 2149 int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, 2150 unsigned nbytes) 2151 { 2152 2153 unsigned int reqsize = 0; 2154 2155 /* 2156 * Uring has all headers separated from args - args is payload only 2157 */ 2158 if (!cs->is_uring) 2159 reqsize = sizeof(struct fuse_out_header); 2160 2161 reqsize += fuse_len_args(args->out_numargs, args->out_args); 2162 2163 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar)) 2164 return -EINVAL; 2165 else if (reqsize > nbytes) { 2166 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1]; 2167 unsigned diffsize = reqsize - nbytes; 2168 2169 if (diffsize > lastarg->size) 2170 return -EINVAL; 2171 lastarg->size -= diffsize; 2172 } 2173 return fuse_copy_args(cs, args->out_numargs, args->out_pages, 2174 args->out_args, args->page_zeroing); 2175 } 2176 2177 /* 2178 * Write a single reply to a request. First the header is copied from 2179 * the write buffer. The request is then searched on the processing 2180 * list by the unique ID found in the header. If found, then remove 2181 * it from the list and copy the rest of the buffer to the request. 2182 * The request is finished by calling fuse_request_end(). 2183 */ 2184 static ssize_t fuse_dev_do_write(struct fuse_dev *fud, 2185 struct fuse_copy_state *cs, size_t nbytes) 2186 { 2187 int err; 2188 struct fuse_conn *fc = fud->fc; 2189 struct fuse_pqueue *fpq = &fud->pq; 2190 struct fuse_req *req; 2191 struct fuse_out_header oh; 2192 2193 err = -EINVAL; 2194 if (nbytes < sizeof(struct fuse_out_header)) 2195 goto out; 2196 2197 err = fuse_copy_one(cs, &oh, sizeof(oh)); 2198 if (err) 2199 goto copy_finish; 2200 2201 err = -EINVAL; 2202 if (oh.len != nbytes) 2203 goto copy_finish; 2204 2205 /* 2206 * Zero oh.unique indicates unsolicited notification message 2207 * and error contains notification code. 2208 */ 2209 if (!oh.unique) { 2210 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); 2211 goto copy_finish; 2212 } 2213 2214 err = -EINVAL; 2215 if (oh.error <= -512 || oh.error > 0) 2216 goto copy_finish; 2217 2218 spin_lock(&fpq->lock); 2219 req = NULL; 2220 if (fpq->connected) 2221 req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); 2222 2223 err = -ENOENT; 2224 if (!req) { 2225 spin_unlock(&fpq->lock); 2226 goto copy_finish; 2227 } 2228 2229 /* Is it an interrupt reply ID? */ 2230 if (oh.unique & FUSE_INT_REQ_BIT) { 2231 __fuse_get_request(req); 2232 spin_unlock(&fpq->lock); 2233 2234 err = 0; 2235 if (nbytes != sizeof(struct fuse_out_header)) 2236 err = -EINVAL; 2237 else if (oh.error == -ENOSYS) 2238 fc->no_interrupt = 1; 2239 else if (oh.error == -EAGAIN) 2240 err = queue_interrupt(req); 2241 2242 fuse_put_request(req); 2243 2244 goto copy_finish; 2245 } 2246 2247 clear_bit(FR_SENT, &req->flags); 2248 list_move(&req->list, &fpq->io); 2249 req->out.h = oh; 2250 set_bit(FR_LOCKED, &req->flags); 2251 spin_unlock(&fpq->lock); 2252 cs->req = req; 2253 if (!req->args->page_replace) 2254 cs->move_folios = false; 2255 2256 if (oh.error) 2257 err = nbytes != sizeof(oh) ? -EINVAL : 0; 2258 else 2259 err = fuse_copy_out_args(cs, req->args, nbytes); 2260 fuse_copy_finish(cs); 2261 2262 spin_lock(&fpq->lock); 2263 clear_bit(FR_LOCKED, &req->flags); 2264 if (!fpq->connected) 2265 err = -ENOENT; 2266 else if (err) 2267 req->out.h.error = -EIO; 2268 if (!test_bit(FR_PRIVATE, &req->flags)) 2269 list_del_init(&req->list); 2270 spin_unlock(&fpq->lock); 2271 2272 fuse_request_end(req); 2273 out: 2274 return err ? err : nbytes; 2275 2276 copy_finish: 2277 fuse_copy_finish(cs); 2278 goto out; 2279 } 2280 2281 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) 2282 { 2283 struct fuse_copy_state cs; 2284 struct fuse_dev *fud = __fuse_get_dev(iocb->ki_filp); 2285 2286 if (!fud) 2287 return -EPERM; 2288 2289 if (!user_backed_iter(from)) 2290 return -EINVAL; 2291 2292 fuse_copy_init(&cs, false, from); 2293 2294 return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); 2295 } 2296 2297 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, 2298 struct file *out, loff_t *ppos, 2299 size_t len, unsigned int flags) 2300 { 2301 unsigned int head, tail, count; 2302 unsigned nbuf; 2303 unsigned idx; 2304 struct pipe_buffer *bufs; 2305 struct fuse_copy_state cs; 2306 struct fuse_dev *fud = __fuse_get_dev(out); 2307 size_t rem; 2308 ssize_t ret; 2309 2310 if (!fud) 2311 return -EPERM; 2312 2313 pipe_lock(pipe); 2314 2315 head = pipe->head; 2316 tail = pipe->tail; 2317 count = pipe_occupancy(head, tail); 2318 2319 bufs = kvmalloc_objs(struct pipe_buffer, count); 2320 if (!bufs) { 2321 pipe_unlock(pipe); 2322 return -ENOMEM; 2323 } 2324 2325 nbuf = 0; 2326 rem = 0; 2327 for (idx = tail; !pipe_empty(head, idx) && rem < len; idx++) 2328 rem += pipe_buf(pipe, idx)->len; 2329 2330 ret = -EINVAL; 2331 if (rem < len) 2332 goto out_free; 2333 2334 rem = len; 2335 while (rem) { 2336 struct pipe_buffer *ibuf; 2337 struct pipe_buffer *obuf; 2338 2339 if (WARN_ON(nbuf >= count || pipe_empty(head, tail))) 2340 goto out_free; 2341 2342 ibuf = pipe_buf(pipe, tail); 2343 obuf = &bufs[nbuf]; 2344 2345 if (rem >= ibuf->len) { 2346 *obuf = *ibuf; 2347 ibuf->ops = NULL; 2348 tail++; 2349 pipe->tail = tail; 2350 } else { 2351 if (!pipe_buf_get(pipe, ibuf)) 2352 goto out_free; 2353 2354 *obuf = *ibuf; 2355 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 2356 obuf->len = rem; 2357 ibuf->offset += obuf->len; 2358 ibuf->len -= obuf->len; 2359 } 2360 nbuf++; 2361 rem -= obuf->len; 2362 } 2363 pipe_unlock(pipe); 2364 2365 fuse_copy_init(&cs, false, NULL); 2366 cs.pipebufs = bufs; 2367 cs.nr_segs = nbuf; 2368 cs.pipe = pipe; 2369 2370 if (flags & SPLICE_F_MOVE) 2371 cs.move_folios = true; 2372 2373 ret = fuse_dev_do_write(fud, &cs, len); 2374 2375 pipe_lock(pipe); 2376 out_free: 2377 for (idx = 0; idx < nbuf; idx++) { 2378 struct pipe_buffer *buf = &bufs[idx]; 2379 2380 if (buf->ops) 2381 pipe_buf_release(pipe, buf); 2382 } 2383 pipe_unlock(pipe); 2384 2385 kvfree(bufs); 2386 return ret; 2387 } 2388 2389 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) 2390 { 2391 __poll_t mask = EPOLLOUT | EPOLLWRNORM; 2392 struct fuse_iqueue *fiq; 2393 struct fuse_dev *fud = fuse_get_dev(file); 2394 2395 if (IS_ERR(fud)) 2396 return EPOLLERR; 2397 2398 fiq = &fud->fc->iq; 2399 poll_wait(file, &fiq->waitq, wait); 2400 2401 spin_lock(&fiq->lock); 2402 if (!fiq->connected) 2403 mask = EPOLLERR; 2404 else if (request_pending(fiq)) 2405 mask |= EPOLLIN | EPOLLRDNORM; 2406 spin_unlock(&fiq->lock); 2407 2408 return mask; 2409 } 2410 2411 /* Abort all requests on the given list (pending or processing) */ 2412 void fuse_dev_end_requests(struct list_head *head) 2413 { 2414 while (!list_empty(head)) { 2415 struct fuse_req *req; 2416 req = list_entry(head->next, struct fuse_req, list); 2417 req->out.h.error = -ECONNABORTED; 2418 clear_bit(FR_SENT, &req->flags); 2419 list_del_init(&req->list); 2420 fuse_request_end(req); 2421 } 2422 } 2423 2424 static void end_polls(struct fuse_conn *fc) 2425 { 2426 struct rb_node *p; 2427 2428 p = rb_first(&fc->polled_files); 2429 2430 while (p) { 2431 struct fuse_file *ff; 2432 ff = rb_entry(p, struct fuse_file, polled_node); 2433 wake_up_interruptible_all(&ff->poll_wait); 2434 2435 p = rb_next(p); 2436 } 2437 } 2438 2439 /* 2440 * Abort all requests. 2441 * 2442 * Emergency exit in case of a malicious or accidental deadlock, or just a hung 2443 * filesystem. 2444 * 2445 * The same effect is usually achievable through killing the filesystem daemon 2446 * and all users of the filesystem. The exception is the combination of an 2447 * asynchronous request and the tricky deadlock (see 2448 * Documentation/filesystems/fuse/fuse.rst). 2449 * 2450 * Aborting requests under I/O goes as follows: 1: Separate out unlocked 2451 * requests, they should be finished off immediately. Locked requests will be 2452 * finished after unlock; see unlock_request(). 2: Finish off the unlocked 2453 * requests. It is possible that some request will finish before we can. This 2454 * is OK, the request will in that case be removed from the list before we touch 2455 * it. 2456 */ 2457 void fuse_abort_conn(struct fuse_conn *fc) 2458 { 2459 struct fuse_iqueue *fiq = &fc->iq; 2460 2461 spin_lock(&fc->lock); 2462 if (fc->connected) { 2463 struct fuse_dev *fud; 2464 struct fuse_req *req, *next; 2465 LIST_HEAD(to_end); 2466 unsigned int i; 2467 2468 if (fc->timeout.req_timeout) 2469 cancel_delayed_work(&fc->timeout.work); 2470 2471 /* Background queuing checks fc->connected under bg_lock */ 2472 spin_lock(&fc->bg_lock); 2473 fc->connected = 0; 2474 spin_unlock(&fc->bg_lock); 2475 2476 fuse_set_initialized(fc); 2477 list_for_each_entry(fud, &fc->devices, entry) { 2478 struct fuse_pqueue *fpq = &fud->pq; 2479 2480 spin_lock(&fpq->lock); 2481 fpq->connected = 0; 2482 list_for_each_entry_safe(req, next, &fpq->io, list) { 2483 req->out.h.error = -ECONNABORTED; 2484 spin_lock(&req->waitq.lock); 2485 set_bit(FR_ABORTED, &req->flags); 2486 if (!test_bit(FR_LOCKED, &req->flags)) { 2487 set_bit(FR_PRIVATE, &req->flags); 2488 __fuse_get_request(req); 2489 list_move(&req->list, &to_end); 2490 } 2491 spin_unlock(&req->waitq.lock); 2492 } 2493 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2494 list_splice_tail_init(&fpq->processing[i], 2495 &to_end); 2496 spin_unlock(&fpq->lock); 2497 } 2498 spin_lock(&fc->bg_lock); 2499 fc->blocked = 0; 2500 fc->max_background = UINT_MAX; 2501 flush_bg_queue(fc); 2502 spin_unlock(&fc->bg_lock); 2503 2504 spin_lock(&fiq->lock); 2505 fiq->connected = 0; 2506 list_for_each_entry(req, &fiq->pending, list) 2507 clear_bit(FR_PENDING, &req->flags); 2508 list_splice_tail_init(&fiq->pending, &to_end); 2509 while (forget_pending(fiq)) 2510 kfree(fuse_dequeue_forget(fiq, 1, NULL)); 2511 wake_up_all(&fiq->waitq); 2512 spin_unlock(&fiq->lock); 2513 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); 2514 end_polls(fc); 2515 wake_up_all(&fc->blocked_waitq); 2516 spin_unlock(&fc->lock); 2517 2518 fuse_dev_end_requests(&to_end); 2519 2520 /* 2521 * fc->lock must not be taken to avoid conflicts with io-uring 2522 * locks 2523 */ 2524 fuse_uring_abort(fc); 2525 } else { 2526 spin_unlock(&fc->lock); 2527 } 2528 } 2529 EXPORT_SYMBOL_GPL(fuse_abort_conn); 2530 2531 void fuse_wait_aborted(struct fuse_conn *fc) 2532 { 2533 /* matches implicit memory barrier in fuse_drop_waiting() */ 2534 smp_mb(); 2535 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); 2536 2537 fuse_uring_wait_stopped_queues(fc); 2538 } 2539 2540 int fuse_dev_release(struct inode *inode, struct file *file) 2541 { 2542 struct fuse_dev *fud = fuse_file_to_fud(file); 2543 /* Pairs with cmpxchg() in fuse_dev_install() */ 2544 struct fuse_conn *fc = xchg(&fud->fc, FUSE_DEV_FC_DISCONNECTED); 2545 2546 if (fc) { 2547 struct fuse_pqueue *fpq = &fud->pq; 2548 LIST_HEAD(to_end); 2549 unsigned int i; 2550 bool last; 2551 2552 spin_lock(&fpq->lock); 2553 WARN_ON(!list_empty(&fpq->io)); 2554 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) 2555 list_splice_init(&fpq->processing[i], &to_end); 2556 spin_unlock(&fpq->lock); 2557 2558 fuse_dev_end_requests(&to_end); 2559 2560 spin_lock(&fc->lock); 2561 list_del(&fud->entry); 2562 /* Are we the last open device? */ 2563 last = list_empty(&fc->devices); 2564 spin_unlock(&fc->lock); 2565 2566 if (last) { 2567 WARN_ON(fc->iq.fasync != NULL); 2568 fuse_abort_conn(fc); 2569 } 2570 fuse_conn_put(fc); 2571 } 2572 fuse_dev_put(fud); 2573 return 0; 2574 } 2575 EXPORT_SYMBOL_GPL(fuse_dev_release); 2576 2577 static int fuse_dev_fasync(int fd, struct file *file, int on) 2578 { 2579 struct fuse_dev *fud = fuse_get_dev(file); 2580 2581 if (IS_ERR(fud)) 2582 return PTR_ERR(fud); 2583 2584 /* No locking - fasync_helper does its own locking */ 2585 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); 2586 } 2587 2588 static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp) 2589 { 2590 int oldfd; 2591 struct fuse_dev *fud, *new_fud; 2592 2593 if (get_user(oldfd, argp)) 2594 return -EFAULT; 2595 2596 CLASS(fd, f)(oldfd); 2597 if (fd_empty(f)) 2598 return -EINVAL; 2599 2600 /* 2601 * Check against file->f_op because CUSE 2602 * uses the same ioctl handler. 2603 */ 2604 if (fd_file(f)->f_op != file->f_op) 2605 return -EINVAL; 2606 2607 fud = fuse_get_dev(fd_file(f)); 2608 if (IS_ERR(fud)) 2609 return PTR_ERR(fud); 2610 2611 new_fud = fuse_file_to_fud(file); 2612 if (fuse_dev_fc_get(new_fud)) 2613 return -EINVAL; 2614 2615 fuse_dev_install(new_fud, fud->fc); 2616 2617 return 0; 2618 } 2619 2620 static long fuse_dev_ioctl_backing_open(struct file *file, 2621 struct fuse_backing_map __user *argp) 2622 { 2623 struct fuse_dev *fud = fuse_get_dev(file); 2624 struct fuse_backing_map map; 2625 2626 if (IS_ERR(fud)) 2627 return PTR_ERR(fud); 2628 2629 if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) 2630 return -EOPNOTSUPP; 2631 2632 if (copy_from_user(&map, argp, sizeof(map))) 2633 return -EFAULT; 2634 2635 return fuse_backing_open(fud->fc, &map); 2636 } 2637 2638 static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp) 2639 { 2640 struct fuse_dev *fud = fuse_get_dev(file); 2641 int backing_id; 2642 2643 if (IS_ERR(fud)) 2644 return PTR_ERR(fud); 2645 2646 if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) 2647 return -EOPNOTSUPP; 2648 2649 if (get_user(backing_id, argp)) 2650 return -EFAULT; 2651 2652 return fuse_backing_close(fud->fc, backing_id); 2653 } 2654 2655 static long fuse_dev_ioctl_sync_init(struct file *file) 2656 { 2657 int err = -EINVAL; 2658 struct fuse_dev *fud = fuse_file_to_fud(file); 2659 2660 mutex_lock(&fuse_mutex); 2661 if (!fuse_dev_fc_get(fud)) { 2662 fud->sync_init = true; 2663 err = 0; 2664 } 2665 mutex_unlock(&fuse_mutex); 2666 return err; 2667 } 2668 2669 static long fuse_dev_ioctl(struct file *file, unsigned int cmd, 2670 unsigned long arg) 2671 { 2672 void __user *argp = (void __user *)arg; 2673 2674 switch (cmd) { 2675 case FUSE_DEV_IOC_CLONE: 2676 return fuse_dev_ioctl_clone(file, argp); 2677 2678 case FUSE_DEV_IOC_BACKING_OPEN: 2679 return fuse_dev_ioctl_backing_open(file, argp); 2680 2681 case FUSE_DEV_IOC_BACKING_CLOSE: 2682 return fuse_dev_ioctl_backing_close(file, argp); 2683 2684 case FUSE_DEV_IOC_SYNC_INIT: 2685 return fuse_dev_ioctl_sync_init(file); 2686 2687 default: 2688 return -ENOTTY; 2689 } 2690 } 2691 2692 #ifdef CONFIG_PROC_FS 2693 static void fuse_dev_show_fdinfo(struct seq_file *seq, struct file *file) 2694 { 2695 struct fuse_dev *fud = __fuse_get_dev(file); 2696 if (!fud) 2697 return; 2698 2699 seq_printf(seq, "fuse_connection:\t%u\n", fud->fc->dev); 2700 } 2701 #endif 2702 2703 const struct file_operations fuse_dev_operations = { 2704 .owner = THIS_MODULE, 2705 .open = fuse_dev_open, 2706 .read_iter = fuse_dev_read, 2707 .splice_read = fuse_dev_splice_read, 2708 .write_iter = fuse_dev_write, 2709 .splice_write = fuse_dev_splice_write, 2710 .poll = fuse_dev_poll, 2711 .release = fuse_dev_release, 2712 .fasync = fuse_dev_fasync, 2713 .unlocked_ioctl = fuse_dev_ioctl, 2714 .compat_ioctl = compat_ptr_ioctl, 2715 #ifdef CONFIG_FUSE_IO_URING 2716 .uring_cmd = fuse_uring_cmd, 2717 #endif 2718 #ifdef CONFIG_PROC_FS 2719 .show_fdinfo = fuse_dev_show_fdinfo, 2720 #endif 2721 }; 2722 EXPORT_SYMBOL_GPL(fuse_dev_operations); 2723 2724 static struct miscdevice fuse_miscdevice = { 2725 .minor = FUSE_MINOR, 2726 .name = "fuse", 2727 .fops = &fuse_dev_operations, 2728 }; 2729 2730 int __init fuse_dev_init(void) 2731 { 2732 int err = -ENOMEM; 2733 fuse_req_cachep = kmem_cache_create("fuse_request", 2734 sizeof(struct fuse_req), 2735 0, 0, NULL); 2736 if (!fuse_req_cachep) 2737 goto out; 2738 2739 err = misc_register(&fuse_miscdevice); 2740 if (err) 2741 goto out_cache_clean; 2742 2743 return 0; 2744 2745 out_cache_clean: 2746 kmem_cache_destroy(fuse_req_cachep); 2747 out: 2748 return err; 2749 } 2750 2751 void fuse_dev_cleanup(void) 2752 { 2753 misc_deregister(&fuse_miscdevice); 2754 kmem_cache_destroy(fuse_req_cachep); 2755 } 2756