1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "dev_uring_i.h"
10 #include "fuse_i.h"
11 #include "fuse_dev_i.h"
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/poll.h>
16 #include <linux/sched/signal.h>
17 #include <linux/uio.h>
18 #include <linux/miscdevice.h>
19 #include <linux/pagemap.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/swap.h>
24 #include <linux/splice.h>
25 #include <linux/sched.h>
26
27 #define CREATE_TRACE_POINTS
28 #include "fuse_trace.h"
29
30 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
31 MODULE_ALIAS("devname:fuse");
32
33 static struct kmem_cache *fuse_req_cachep;
34
35 const unsigned long fuse_timeout_timer_freq =
36 secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ);
37
fuse_request_expired(struct fuse_conn * fc,struct list_head * list)38 bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list)
39 {
40 struct fuse_req *req;
41
42 req = list_first_entry_or_null(list, struct fuse_req, list);
43 if (!req)
44 return false;
45 return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
46 }
47
fuse_fpq_processing_expired(struct fuse_conn * fc,struct list_head * processing)48 bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing)
49 {
50 int i;
51
52 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
53 if (fuse_request_expired(fc, &processing[i]))
54 return true;
55
56 return false;
57 }
58
59 /*
60 * Check if any requests aren't being completed by the time the request timeout
61 * elapses. To do so, we:
62 * - check the fiq pending list
63 * - check the bg queue
64 * - check the fpq io and processing lists
65 *
66 * To make this fast, we only check against the head request on each list since
67 * these are generally queued in order of creation time (eg newer requests get
68 * queued to the tail). We might miss a few edge cases (eg requests transitioning
69 * between lists, re-sent requests at the head of the pending list having a
70 * later creation time than other requests on that list, etc.) but that is fine
71 * since if the request never gets fulfilled, it will eventually be caught.
72 */
fuse_check_timeout(struct work_struct * work)73 void fuse_check_timeout(struct work_struct *work)
74 {
75 struct delayed_work *dwork = to_delayed_work(work);
76 struct fuse_conn *fc = container_of(dwork, struct fuse_conn,
77 timeout.work);
78 struct fuse_iqueue *fiq = &fc->iq;
79 struct fuse_dev *fud;
80 struct fuse_pqueue *fpq;
81 bool expired = false;
82
83 if (!atomic_read(&fc->num_waiting))
84 goto out;
85
86 spin_lock(&fiq->lock);
87 expired = fuse_request_expired(fc, &fiq->pending);
88 spin_unlock(&fiq->lock);
89 if (expired)
90 goto abort_conn;
91
92 spin_lock(&fc->bg_lock);
93 expired = fuse_request_expired(fc, &fc->bg_queue);
94 spin_unlock(&fc->bg_lock);
95 if (expired)
96 goto abort_conn;
97
98 spin_lock(&fc->lock);
99 if (!fc->connected) {
100 spin_unlock(&fc->lock);
101 return;
102 }
103 list_for_each_entry(fud, &fc->devices, entry) {
104 fpq = &fud->pq;
105 spin_lock(&fpq->lock);
106 if (fuse_request_expired(fc, &fpq->io) ||
107 fuse_fpq_processing_expired(fc, fpq->processing)) {
108 spin_unlock(&fpq->lock);
109 spin_unlock(&fc->lock);
110 goto abort_conn;
111 }
112
113 spin_unlock(&fpq->lock);
114 }
115 spin_unlock(&fc->lock);
116
117 if (fuse_uring_request_expired(fc))
118 goto abort_conn;
119
120 out:
121 queue_delayed_work(system_wq, &fc->timeout.work,
122 fuse_timeout_timer_freq);
123 return;
124
125 abort_conn:
126 fuse_abort_conn(fc);
127 }
128
fuse_request_init(struct fuse_mount * fm,struct fuse_req * req)129 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
130 {
131 INIT_LIST_HEAD(&req->list);
132 INIT_LIST_HEAD(&req->intr_entry);
133 init_waitqueue_head(&req->waitq);
134 refcount_set(&req->count, 1);
135 __set_bit(FR_PENDING, &req->flags);
136 req->fm = fm;
137 req->create_time = jiffies;
138 }
139
fuse_request_alloc(struct fuse_mount * fm,gfp_t flags)140 static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
141 {
142 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
143 if (req)
144 fuse_request_init(fm, req);
145
146 return req;
147 }
148
fuse_request_free(struct fuse_req * req)149 static void fuse_request_free(struct fuse_req *req)
150 {
151 kmem_cache_free(fuse_req_cachep, req);
152 }
153
__fuse_get_request(struct fuse_req * req)154 static void __fuse_get_request(struct fuse_req *req)
155 {
156 refcount_inc(&req->count);
157 }
158
159 /* Must be called with > 1 refcount */
__fuse_put_request(struct fuse_req * req)160 static void __fuse_put_request(struct fuse_req *req)
161 {
162 refcount_dec(&req->count);
163 }
164
fuse_set_initialized(struct fuse_conn * fc)165 void fuse_set_initialized(struct fuse_conn *fc)
166 {
167 /* Make sure stores before this are seen on another CPU */
168 smp_wmb();
169 fc->initialized = 1;
170 }
171
fuse_block_alloc(struct fuse_conn * fc,bool for_background)172 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
173 {
174 return !fc->initialized || (for_background && fc->blocked) ||
175 (fc->io_uring && fc->connected && !fuse_uring_ready(fc));
176 }
177
fuse_drop_waiting(struct fuse_conn * fc)178 static void fuse_drop_waiting(struct fuse_conn *fc)
179 {
180 /*
181 * lockess check of fc->connected is okay, because atomic_dec_and_test()
182 * provides a memory barrier matched with the one in fuse_wait_aborted()
183 * to ensure no wake-up is missed.
184 */
185 if (atomic_dec_and_test(&fc->num_waiting) &&
186 !READ_ONCE(fc->connected)) {
187 /* wake up aborters */
188 wake_up_all(&fc->blocked_waitq);
189 }
190 }
191
192 static void fuse_put_request(struct fuse_req *req);
193
fuse_get_req(struct mnt_idmap * idmap,struct fuse_mount * fm,bool for_background)194 static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap,
195 struct fuse_mount *fm,
196 bool for_background)
197 {
198 struct fuse_conn *fc = fm->fc;
199 struct fuse_req *req;
200 bool no_idmap = !fm->sb || (fm->sb->s_iflags & SB_I_NOIDMAP);
201 kuid_t fsuid;
202 kgid_t fsgid;
203 int err;
204
205 atomic_inc(&fc->num_waiting);
206
207 if (fuse_block_alloc(fc, for_background)) {
208 err = -EINTR;
209 if (wait_event_killable_exclusive(fc->blocked_waitq,
210 !fuse_block_alloc(fc, for_background)))
211 goto out;
212 }
213 /* Matches smp_wmb() in fuse_set_initialized() */
214 smp_rmb();
215
216 err = -ENOTCONN;
217 if (!fc->connected)
218 goto out;
219
220 err = -ECONNREFUSED;
221 if (fc->conn_error)
222 goto out;
223
224 req = fuse_request_alloc(fm, GFP_KERNEL);
225 err = -ENOMEM;
226 if (!req) {
227 if (for_background)
228 wake_up(&fc->blocked_waitq);
229 goto out;
230 }
231
232 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
233
234 __set_bit(FR_WAITING, &req->flags);
235 if (for_background)
236 __set_bit(FR_BACKGROUND, &req->flags);
237
238 /*
239 * Keep the old behavior when idmappings support was not
240 * declared by a FUSE server.
241 *
242 * For those FUSE servers who support idmapped mounts,
243 * we send UID/GID only along with "inode creation"
244 * fuse requests, otherwise idmap == &invalid_mnt_idmap and
245 * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID.
246 */
247 fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns);
248 fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns);
249 req->in.h.uid = from_kuid(fc->user_ns, fsuid);
250 req->in.h.gid = from_kgid(fc->user_ns, fsgid);
251
252 if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) ||
253 req->in.h.gid == ((gid_t)-1))) {
254 fuse_put_request(req);
255 return ERR_PTR(-EOVERFLOW);
256 }
257
258 return req;
259
260 out:
261 fuse_drop_waiting(fc);
262 return ERR_PTR(err);
263 }
264
fuse_put_request(struct fuse_req * req)265 static void fuse_put_request(struct fuse_req *req)
266 {
267 struct fuse_conn *fc = req->fm->fc;
268
269 if (refcount_dec_and_test(&req->count)) {
270 if (test_bit(FR_BACKGROUND, &req->flags)) {
271 /*
272 * We get here in the unlikely case that a background
273 * request was allocated but not sent
274 */
275 spin_lock(&fc->bg_lock);
276 if (!fc->blocked)
277 wake_up(&fc->blocked_waitq);
278 spin_unlock(&fc->bg_lock);
279 }
280
281 if (test_bit(FR_WAITING, &req->flags)) {
282 __clear_bit(FR_WAITING, &req->flags);
283 fuse_drop_waiting(fc);
284 }
285
286 fuse_request_free(req);
287 }
288 }
289
fuse_len_args(unsigned int numargs,struct fuse_arg * args)290 unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
291 {
292 unsigned nbytes = 0;
293 unsigned i;
294
295 for (i = 0; i < numargs; i++)
296 nbytes += args[i].size;
297
298 return nbytes;
299 }
300 EXPORT_SYMBOL_GPL(fuse_len_args);
301
fuse_get_unique_locked(struct fuse_iqueue * fiq)302 static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
303 {
304 fiq->reqctr += FUSE_REQ_ID_STEP;
305 return fiq->reqctr;
306 }
307
fuse_get_unique(struct fuse_iqueue * fiq)308 u64 fuse_get_unique(struct fuse_iqueue *fiq)
309 {
310 u64 ret;
311
312 spin_lock(&fiq->lock);
313 ret = fuse_get_unique_locked(fiq);
314 spin_unlock(&fiq->lock);
315
316 return ret;
317 }
318 EXPORT_SYMBOL_GPL(fuse_get_unique);
319
fuse_req_hash(u64 unique)320 unsigned int fuse_req_hash(u64 unique)
321 {
322 return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
323 }
324
325 /*
326 * A new request is available, wake fiq->waitq
327 */
fuse_dev_wake_and_unlock(struct fuse_iqueue * fiq)328 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
329 __releases(fiq->lock)
330 {
331 wake_up(&fiq->waitq);
332 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
333 spin_unlock(&fiq->lock);
334 }
335
fuse_dev_queue_forget(struct fuse_iqueue * fiq,struct fuse_forget_link * forget)336 void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
337 struct fuse_forget_link *forget)
338 {
339 spin_lock(&fiq->lock);
340 if (fiq->connected) {
341 fiq->forget_list_tail->next = forget;
342 fiq->forget_list_tail = forget;
343 fuse_dev_wake_and_unlock(fiq);
344 } else {
345 kfree(forget);
346 spin_unlock(&fiq->lock);
347 }
348 }
349
fuse_dev_queue_interrupt(struct fuse_iqueue * fiq,struct fuse_req * req)350 void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
351 {
352 spin_lock(&fiq->lock);
353 if (list_empty(&req->intr_entry)) {
354 list_add_tail(&req->intr_entry, &fiq->interrupts);
355 /*
356 * Pairs with smp_mb() implied by test_and_set_bit()
357 * from fuse_request_end().
358 */
359 smp_mb();
360 if (test_bit(FR_FINISHED, &req->flags)) {
361 list_del_init(&req->intr_entry);
362 spin_unlock(&fiq->lock);
363 } else {
364 fuse_dev_wake_and_unlock(fiq);
365 }
366 } else {
367 spin_unlock(&fiq->lock);
368 }
369 }
370
fuse_dev_queue_req(struct fuse_iqueue * fiq,struct fuse_req * req)371 static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
372 {
373 spin_lock(&fiq->lock);
374 if (fiq->connected) {
375 if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
376 req->in.h.unique = fuse_get_unique_locked(fiq);
377 list_add_tail(&req->list, &fiq->pending);
378 fuse_dev_wake_and_unlock(fiq);
379 } else {
380 spin_unlock(&fiq->lock);
381 req->out.h.error = -ENOTCONN;
382 clear_bit(FR_PENDING, &req->flags);
383 fuse_request_end(req);
384 }
385 }
386
387 const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
388 .send_forget = fuse_dev_queue_forget,
389 .send_interrupt = fuse_dev_queue_interrupt,
390 .send_req = fuse_dev_queue_req,
391 };
392 EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
393
fuse_send_one(struct fuse_iqueue * fiq,struct fuse_req * req)394 static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
395 {
396 req->in.h.len = sizeof(struct fuse_in_header) +
397 fuse_len_args(req->args->in_numargs,
398 (struct fuse_arg *) req->args->in_args);
399 trace_fuse_request_send(req);
400 fiq->ops->send_req(fiq, req);
401 }
402
fuse_queue_forget(struct fuse_conn * fc,struct fuse_forget_link * forget,u64 nodeid,u64 nlookup)403 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
404 u64 nodeid, u64 nlookup)
405 {
406 struct fuse_iqueue *fiq = &fc->iq;
407
408 forget->forget_one.nodeid = nodeid;
409 forget->forget_one.nlookup = nlookup;
410
411 fiq->ops->send_forget(fiq, forget);
412 }
413
flush_bg_queue(struct fuse_conn * fc)414 static void flush_bg_queue(struct fuse_conn *fc)
415 {
416 struct fuse_iqueue *fiq = &fc->iq;
417
418 while (fc->active_background < fc->max_background &&
419 !list_empty(&fc->bg_queue)) {
420 struct fuse_req *req;
421
422 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
423 list_del(&req->list);
424 fc->active_background++;
425 fuse_send_one(fiq, req);
426 }
427 }
428
429 /*
430 * This function is called when a request is finished. Either a reply
431 * has arrived or it was aborted (and not yet sent) or some error
432 * occurred during communication with userspace, or the device file
433 * was closed. The requester thread is woken up (if still waiting),
434 * the 'end' callback is called if given, else the reference to the
435 * request is released
436 */
fuse_request_end(struct fuse_req * req)437 void fuse_request_end(struct fuse_req *req)
438 {
439 struct fuse_mount *fm = req->fm;
440 struct fuse_conn *fc = fm->fc;
441 struct fuse_iqueue *fiq = &fc->iq;
442
443 if (test_and_set_bit(FR_FINISHED, &req->flags))
444 goto put_request;
445
446 trace_fuse_request_end(req);
447 /*
448 * test_and_set_bit() implies smp_mb() between bit
449 * changing and below FR_INTERRUPTED check. Pairs with
450 * smp_mb() from queue_interrupt().
451 */
452 if (test_bit(FR_INTERRUPTED, &req->flags)) {
453 spin_lock(&fiq->lock);
454 list_del_init(&req->intr_entry);
455 spin_unlock(&fiq->lock);
456 }
457 WARN_ON(test_bit(FR_PENDING, &req->flags));
458 WARN_ON(test_bit(FR_SENT, &req->flags));
459 if (test_bit(FR_BACKGROUND, &req->flags)) {
460 spin_lock(&fc->bg_lock);
461 clear_bit(FR_BACKGROUND, &req->flags);
462 if (fc->num_background == fc->max_background) {
463 fc->blocked = 0;
464 wake_up(&fc->blocked_waitq);
465 } else if (!fc->blocked) {
466 /*
467 * Wake up next waiter, if any. It's okay to use
468 * waitqueue_active(), as we've already synced up
469 * fc->blocked with waiters with the wake_up() call
470 * above.
471 */
472 if (waitqueue_active(&fc->blocked_waitq))
473 wake_up(&fc->blocked_waitq);
474 }
475
476 fc->num_background--;
477 fc->active_background--;
478 flush_bg_queue(fc);
479 spin_unlock(&fc->bg_lock);
480 } else {
481 /* Wake up waiter sleeping in request_wait_answer() */
482 wake_up(&req->waitq);
483 }
484
485 if (test_bit(FR_ASYNC, &req->flags))
486 req->args->end(fm, req->args, req->out.h.error);
487 put_request:
488 fuse_put_request(req);
489 }
490 EXPORT_SYMBOL_GPL(fuse_request_end);
491
queue_interrupt(struct fuse_req * req)492 static int queue_interrupt(struct fuse_req *req)
493 {
494 struct fuse_iqueue *fiq = &req->fm->fc->iq;
495
496 /* Check for we've sent request to interrupt this req */
497 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
498 return -EINVAL;
499
500 fiq->ops->send_interrupt(fiq, req);
501
502 return 0;
503 }
504
fuse_remove_pending_req(struct fuse_req * req,spinlock_t * lock)505 bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
506 {
507 spin_lock(lock);
508 if (test_bit(FR_PENDING, &req->flags)) {
509 /*
510 * FR_PENDING does not get cleared as the request will end
511 * up in destruction anyway.
512 */
513 list_del(&req->list);
514 spin_unlock(lock);
515 __fuse_put_request(req);
516 req->out.h.error = -EINTR;
517 return true;
518 }
519 spin_unlock(lock);
520 return false;
521 }
522
request_wait_answer(struct fuse_req * req)523 static void request_wait_answer(struct fuse_req *req)
524 {
525 struct fuse_conn *fc = req->fm->fc;
526 struct fuse_iqueue *fiq = &fc->iq;
527 int err;
528
529 if (!fc->no_interrupt) {
530 /* Any signal may interrupt this */
531 err = wait_event_interruptible(req->waitq,
532 test_bit(FR_FINISHED, &req->flags));
533 if (!err)
534 return;
535
536 set_bit(FR_INTERRUPTED, &req->flags);
537 /* matches barrier in fuse_dev_do_read() */
538 smp_mb__after_atomic();
539 if (test_bit(FR_SENT, &req->flags))
540 queue_interrupt(req);
541 }
542
543 if (!test_bit(FR_FORCE, &req->flags)) {
544 bool removed;
545
546 /* Only fatal signals may interrupt this */
547 err = wait_event_killable(req->waitq,
548 test_bit(FR_FINISHED, &req->flags));
549 if (!err)
550 return;
551
552 if (test_bit(FR_URING, &req->flags))
553 removed = fuse_uring_remove_pending_req(req);
554 else
555 removed = fuse_remove_pending_req(req, &fiq->lock);
556 if (removed)
557 return;
558 }
559
560 /*
561 * Either request is already in userspace, or it was forced.
562 * Wait it out.
563 */
564 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
565 }
566
__fuse_request_send(struct fuse_req * req)567 static void __fuse_request_send(struct fuse_req *req)
568 {
569 struct fuse_iqueue *fiq = &req->fm->fc->iq;
570
571 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
572
573 /* acquire extra reference, since request is still needed after
574 fuse_request_end() */
575 __fuse_get_request(req);
576 fuse_send_one(fiq, req);
577
578 request_wait_answer(req);
579 /* Pairs with smp_wmb() in fuse_request_end() */
580 smp_rmb();
581 }
582
fuse_adjust_compat(struct fuse_conn * fc,struct fuse_args * args)583 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
584 {
585 if (fc->minor < 4 && args->opcode == FUSE_STATFS)
586 args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
587
588 if (fc->minor < 9) {
589 switch (args->opcode) {
590 case FUSE_LOOKUP:
591 case FUSE_CREATE:
592 case FUSE_MKNOD:
593 case FUSE_MKDIR:
594 case FUSE_SYMLINK:
595 case FUSE_LINK:
596 args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
597 break;
598 case FUSE_GETATTR:
599 case FUSE_SETATTR:
600 args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
601 break;
602 }
603 }
604 if (fc->minor < 12) {
605 switch (args->opcode) {
606 case FUSE_CREATE:
607 args->in_args[0].size = sizeof(struct fuse_open_in);
608 break;
609 case FUSE_MKNOD:
610 args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
611 break;
612 }
613 }
614 }
615
fuse_force_creds(struct fuse_req * req)616 static void fuse_force_creds(struct fuse_req *req)
617 {
618 struct fuse_conn *fc = req->fm->fc;
619
620 if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) {
621 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
622 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
623 } else {
624 req->in.h.uid = FUSE_INVALID_UIDGID;
625 req->in.h.gid = FUSE_INVALID_UIDGID;
626 }
627
628 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
629 }
630
fuse_args_to_req(struct fuse_req * req,struct fuse_args * args)631 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
632 {
633 req->in.h.opcode = args->opcode;
634 req->in.h.nodeid = args->nodeid;
635 req->args = args;
636 if (args->is_ext)
637 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
638 if (args->end)
639 __set_bit(FR_ASYNC, &req->flags);
640 }
641
__fuse_simple_request(struct mnt_idmap * idmap,struct fuse_mount * fm,struct fuse_args * args)642 ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
643 struct fuse_mount *fm,
644 struct fuse_args *args)
645 {
646 struct fuse_conn *fc = fm->fc;
647 struct fuse_req *req;
648 ssize_t ret;
649
650 if (args->force) {
651 atomic_inc(&fc->num_waiting);
652 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
653
654 if (!args->nocreds)
655 fuse_force_creds(req);
656
657 __set_bit(FR_WAITING, &req->flags);
658 __set_bit(FR_FORCE, &req->flags);
659 } else {
660 WARN_ON(args->nocreds);
661 req = fuse_get_req(idmap, fm, false);
662 if (IS_ERR(req))
663 return PTR_ERR(req);
664 }
665
666 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
667 fuse_adjust_compat(fc, args);
668 fuse_args_to_req(req, args);
669
670 if (!args->noreply)
671 __set_bit(FR_ISREPLY, &req->flags);
672 __fuse_request_send(req);
673 ret = req->out.h.error;
674 if (!ret && args->out_argvar) {
675 BUG_ON(args->out_numargs == 0);
676 ret = args->out_args[args->out_numargs - 1].size;
677 }
678 fuse_put_request(req);
679
680 return ret;
681 }
682
683 #ifdef CONFIG_FUSE_IO_URING
fuse_request_queue_background_uring(struct fuse_conn * fc,struct fuse_req * req)684 static bool fuse_request_queue_background_uring(struct fuse_conn *fc,
685 struct fuse_req *req)
686 {
687 struct fuse_iqueue *fiq = &fc->iq;
688
689 req->in.h.unique = fuse_get_unique(fiq);
690 req->in.h.len = sizeof(struct fuse_in_header) +
691 fuse_len_args(req->args->in_numargs,
692 (struct fuse_arg *) req->args->in_args);
693
694 return fuse_uring_queue_bq_req(req);
695 }
696 #endif
697
698 /*
699 * @return true if queued
700 */
fuse_request_queue_background(struct fuse_req * req)701 static int fuse_request_queue_background(struct fuse_req *req)
702 {
703 struct fuse_mount *fm = req->fm;
704 struct fuse_conn *fc = fm->fc;
705 bool queued = false;
706
707 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
708 if (!test_bit(FR_WAITING, &req->flags)) {
709 __set_bit(FR_WAITING, &req->flags);
710 atomic_inc(&fc->num_waiting);
711 }
712 __set_bit(FR_ISREPLY, &req->flags);
713
714 #ifdef CONFIG_FUSE_IO_URING
715 if (fuse_uring_ready(fc))
716 return fuse_request_queue_background_uring(fc, req);
717 #endif
718
719 spin_lock(&fc->bg_lock);
720 if (likely(fc->connected)) {
721 fc->num_background++;
722 if (fc->num_background == fc->max_background)
723 fc->blocked = 1;
724 list_add_tail(&req->list, &fc->bg_queue);
725 flush_bg_queue(fc);
726 queued = true;
727 }
728 spin_unlock(&fc->bg_lock);
729
730 return queued;
731 }
732
fuse_simple_background(struct fuse_mount * fm,struct fuse_args * args,gfp_t gfp_flags)733 int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
734 gfp_t gfp_flags)
735 {
736 struct fuse_req *req;
737
738 if (args->force) {
739 WARN_ON(!args->nocreds);
740 req = fuse_request_alloc(fm, gfp_flags);
741 if (!req)
742 return -ENOMEM;
743 __set_bit(FR_BACKGROUND, &req->flags);
744 } else {
745 WARN_ON(args->nocreds);
746 req = fuse_get_req(&invalid_mnt_idmap, fm, true);
747 if (IS_ERR(req))
748 return PTR_ERR(req);
749 }
750
751 fuse_args_to_req(req, args);
752
753 if (!fuse_request_queue_background(req)) {
754 fuse_put_request(req);
755 return -ENOTCONN;
756 }
757
758 return 0;
759 }
760 EXPORT_SYMBOL_GPL(fuse_simple_background);
761
fuse_simple_notify_reply(struct fuse_mount * fm,struct fuse_args * args,u64 unique)762 static int fuse_simple_notify_reply(struct fuse_mount *fm,
763 struct fuse_args *args, u64 unique)
764 {
765 struct fuse_req *req;
766 struct fuse_iqueue *fiq = &fm->fc->iq;
767
768 req = fuse_get_req(&invalid_mnt_idmap, fm, false);
769 if (IS_ERR(req))
770 return PTR_ERR(req);
771
772 __clear_bit(FR_ISREPLY, &req->flags);
773 req->in.h.unique = unique;
774
775 fuse_args_to_req(req, args);
776
777 fuse_send_one(fiq, req);
778
779 return 0;
780 }
781
782 /*
783 * Lock the request. Up to the next unlock_request() there mustn't be
784 * anything that could cause a page-fault. If the request was already
785 * aborted bail out.
786 */
lock_request(struct fuse_req * req)787 static int lock_request(struct fuse_req *req)
788 {
789 int err = 0;
790 if (req) {
791 spin_lock(&req->waitq.lock);
792 if (test_bit(FR_ABORTED, &req->flags))
793 err = -ENOENT;
794 else
795 set_bit(FR_LOCKED, &req->flags);
796 spin_unlock(&req->waitq.lock);
797 }
798 return err;
799 }
800
801 /*
802 * Unlock request. If it was aborted while locked, caller is responsible
803 * for unlocking and ending the request.
804 */
unlock_request(struct fuse_req * req)805 static int unlock_request(struct fuse_req *req)
806 {
807 int err = 0;
808 if (req) {
809 spin_lock(&req->waitq.lock);
810 if (test_bit(FR_ABORTED, &req->flags))
811 err = -ENOENT;
812 else
813 clear_bit(FR_LOCKED, &req->flags);
814 spin_unlock(&req->waitq.lock);
815 }
816 return err;
817 }
818
fuse_copy_init(struct fuse_copy_state * cs,int write,struct iov_iter * iter)819 void fuse_copy_init(struct fuse_copy_state *cs, int write,
820 struct iov_iter *iter)
821 {
822 memset(cs, 0, sizeof(*cs));
823 cs->write = write;
824 cs->iter = iter;
825 }
826
827 /* Unmap and put previous page of userspace buffer */
fuse_copy_finish(struct fuse_copy_state * cs)828 static void fuse_copy_finish(struct fuse_copy_state *cs)
829 {
830 if (cs->currbuf) {
831 struct pipe_buffer *buf = cs->currbuf;
832
833 if (cs->write)
834 buf->len = PAGE_SIZE - cs->len;
835 cs->currbuf = NULL;
836 } else if (cs->pg) {
837 if (cs->write) {
838 flush_dcache_page(cs->pg);
839 set_page_dirty_lock(cs->pg);
840 }
841 put_page(cs->pg);
842 }
843 cs->pg = NULL;
844 }
845
846 /*
847 * Get another pagefull of userspace buffer, and map it to kernel
848 * address space, and lock request
849 */
fuse_copy_fill(struct fuse_copy_state * cs)850 static int fuse_copy_fill(struct fuse_copy_state *cs)
851 {
852 struct page *page;
853 int err;
854
855 err = unlock_request(cs->req);
856 if (err)
857 return err;
858
859 fuse_copy_finish(cs);
860 if (cs->pipebufs) {
861 struct pipe_buffer *buf = cs->pipebufs;
862
863 if (!cs->write) {
864 err = pipe_buf_confirm(cs->pipe, buf);
865 if (err)
866 return err;
867
868 BUG_ON(!cs->nr_segs);
869 cs->currbuf = buf;
870 cs->pg = buf->page;
871 cs->offset = buf->offset;
872 cs->len = buf->len;
873 cs->pipebufs++;
874 cs->nr_segs--;
875 } else {
876 if (cs->nr_segs >= cs->pipe->max_usage)
877 return -EIO;
878
879 page = alloc_page(GFP_HIGHUSER);
880 if (!page)
881 return -ENOMEM;
882
883 buf->page = page;
884 buf->offset = 0;
885 buf->len = 0;
886
887 cs->currbuf = buf;
888 cs->pg = page;
889 cs->offset = 0;
890 cs->len = PAGE_SIZE;
891 cs->pipebufs++;
892 cs->nr_segs++;
893 }
894 } else {
895 size_t off;
896 err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
897 if (err < 0)
898 return err;
899 BUG_ON(!err);
900 cs->len = err;
901 cs->offset = off;
902 cs->pg = page;
903 }
904
905 return lock_request(cs->req);
906 }
907
908 /* Do as much copy to/from userspace buffer as we can */
fuse_copy_do(struct fuse_copy_state * cs,void ** val,unsigned * size)909 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
910 {
911 unsigned ncpy = min(*size, cs->len);
912 if (val) {
913 void *pgaddr = kmap_local_page(cs->pg);
914 void *buf = pgaddr + cs->offset;
915
916 if (cs->write)
917 memcpy(buf, *val, ncpy);
918 else
919 memcpy(*val, buf, ncpy);
920
921 kunmap_local(pgaddr);
922 *val += ncpy;
923 }
924 *size -= ncpy;
925 cs->len -= ncpy;
926 cs->offset += ncpy;
927 if (cs->is_uring)
928 cs->ring.copied_sz += ncpy;
929
930 return ncpy;
931 }
932
fuse_check_folio(struct folio * folio)933 static int fuse_check_folio(struct folio *folio)
934 {
935 if (folio_mapped(folio) ||
936 folio->mapping != NULL ||
937 (folio->flags & PAGE_FLAGS_CHECK_AT_PREP &
938 ~(1 << PG_locked |
939 1 << PG_referenced |
940 1 << PG_lru |
941 1 << PG_active |
942 1 << PG_workingset |
943 1 << PG_reclaim |
944 1 << PG_waiters |
945 LRU_GEN_MASK | LRU_REFS_MASK))) {
946 dump_page(&folio->page, "fuse: trying to steal weird page");
947 return 1;
948 }
949 return 0;
950 }
951
952 /*
953 * Attempt to steal a page from the splice() pipe and move it into the
954 * pagecache. If successful, the pointer in @pagep will be updated. The
955 * folio that was originally in @pagep will lose a reference and the new
956 * folio returned in @pagep will carry a reference.
957 */
fuse_try_move_page(struct fuse_copy_state * cs,struct page ** pagep)958 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
959 {
960 int err;
961 struct folio *oldfolio = page_folio(*pagep);
962 struct folio *newfolio;
963 struct pipe_buffer *buf = cs->pipebufs;
964
965 folio_get(oldfolio);
966 err = unlock_request(cs->req);
967 if (err)
968 goto out_put_old;
969
970 fuse_copy_finish(cs);
971
972 err = pipe_buf_confirm(cs->pipe, buf);
973 if (err)
974 goto out_put_old;
975
976 BUG_ON(!cs->nr_segs);
977 cs->currbuf = buf;
978 cs->len = buf->len;
979 cs->pipebufs++;
980 cs->nr_segs--;
981
982 if (cs->len != PAGE_SIZE)
983 goto out_fallback;
984
985 if (!pipe_buf_try_steal(cs->pipe, buf))
986 goto out_fallback;
987
988 newfolio = page_folio(buf->page);
989
990 folio_clear_uptodate(newfolio);
991 folio_clear_mappedtodisk(newfolio);
992
993 if (fuse_check_folio(newfolio) != 0)
994 goto out_fallback_unlock;
995
996 /*
997 * This is a new and locked page, it shouldn't be mapped or
998 * have any special flags on it
999 */
1000 if (WARN_ON(folio_mapped(oldfolio)))
1001 goto out_fallback_unlock;
1002 if (WARN_ON(folio_has_private(oldfolio)))
1003 goto out_fallback_unlock;
1004 if (WARN_ON(folio_test_dirty(oldfolio) ||
1005 folio_test_writeback(oldfolio)))
1006 goto out_fallback_unlock;
1007 if (WARN_ON(folio_test_mlocked(oldfolio)))
1008 goto out_fallback_unlock;
1009
1010 replace_page_cache_folio(oldfolio, newfolio);
1011
1012 folio_get(newfolio);
1013
1014 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
1015 folio_add_lru(newfolio);
1016
1017 /*
1018 * Release while we have extra ref on stolen page. Otherwise
1019 * anon_pipe_buf_release() might think the page can be reused.
1020 */
1021 pipe_buf_release(cs->pipe, buf);
1022
1023 err = 0;
1024 spin_lock(&cs->req->waitq.lock);
1025 if (test_bit(FR_ABORTED, &cs->req->flags))
1026 err = -ENOENT;
1027 else
1028 *pagep = &newfolio->page;
1029 spin_unlock(&cs->req->waitq.lock);
1030
1031 if (err) {
1032 folio_unlock(newfolio);
1033 folio_put(newfolio);
1034 goto out_put_old;
1035 }
1036
1037 folio_unlock(oldfolio);
1038 /* Drop ref for ap->pages[] array */
1039 folio_put(oldfolio);
1040 cs->len = 0;
1041
1042 err = 0;
1043 out_put_old:
1044 /* Drop ref obtained in this function */
1045 folio_put(oldfolio);
1046 return err;
1047
1048 out_fallback_unlock:
1049 folio_unlock(newfolio);
1050 out_fallback:
1051 cs->pg = buf->page;
1052 cs->offset = buf->offset;
1053
1054 err = lock_request(cs->req);
1055 if (!err)
1056 err = 1;
1057
1058 goto out_put_old;
1059 }
1060
fuse_ref_page(struct fuse_copy_state * cs,struct page * page,unsigned offset,unsigned count)1061 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
1062 unsigned offset, unsigned count)
1063 {
1064 struct pipe_buffer *buf;
1065 int err;
1066
1067 if (cs->nr_segs >= cs->pipe->max_usage)
1068 return -EIO;
1069
1070 get_page(page);
1071 err = unlock_request(cs->req);
1072 if (err) {
1073 put_page(page);
1074 return err;
1075 }
1076
1077 fuse_copy_finish(cs);
1078
1079 buf = cs->pipebufs;
1080 buf->page = page;
1081 buf->offset = offset;
1082 buf->len = count;
1083
1084 cs->pipebufs++;
1085 cs->nr_segs++;
1086 cs->len = 0;
1087
1088 return 0;
1089 }
1090
1091 /*
1092 * Copy a page in the request to/from the userspace buffer. Must be
1093 * done atomically
1094 */
fuse_copy_page(struct fuse_copy_state * cs,struct page ** pagep,unsigned offset,unsigned count,int zeroing)1095 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
1096 unsigned offset, unsigned count, int zeroing)
1097 {
1098 int err;
1099 struct page *page = *pagep;
1100
1101 if (page && zeroing && count < PAGE_SIZE)
1102 clear_highpage(page);
1103
1104 while (count) {
1105 if (cs->write && cs->pipebufs && page) {
1106 /*
1107 * Can't control lifetime of pipe buffers, so always
1108 * copy user pages.
1109 */
1110 if (cs->req->args->user_pages) {
1111 err = fuse_copy_fill(cs);
1112 if (err)
1113 return err;
1114 } else {
1115 return fuse_ref_page(cs, page, offset, count);
1116 }
1117 } else if (!cs->len) {
1118 if (cs->move_pages && page &&
1119 offset == 0 && count == PAGE_SIZE) {
1120 err = fuse_try_move_page(cs, pagep);
1121 if (err <= 0)
1122 return err;
1123 } else {
1124 err = fuse_copy_fill(cs);
1125 if (err)
1126 return err;
1127 }
1128 }
1129 if (page) {
1130 void *mapaddr = kmap_local_page(page);
1131 void *buf = mapaddr + offset;
1132 offset += fuse_copy_do(cs, &buf, &count);
1133 kunmap_local(mapaddr);
1134 } else
1135 offset += fuse_copy_do(cs, NULL, &count);
1136 }
1137 if (page && !cs->write)
1138 flush_dcache_page(page);
1139 return 0;
1140 }
1141
1142 /* Copy pages in the request to/from userspace buffer */
fuse_copy_pages(struct fuse_copy_state * cs,unsigned nbytes,int zeroing)1143 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1144 int zeroing)
1145 {
1146 unsigned i;
1147 struct fuse_req *req = cs->req;
1148 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1149
1150 for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) {
1151 int err;
1152 unsigned int offset = ap->descs[i].offset;
1153 unsigned int count = min(nbytes, ap->descs[i].length);
1154 struct page *orig, *pagep;
1155
1156 orig = pagep = &ap->folios[i]->page;
1157
1158 err = fuse_copy_page(cs, &pagep, offset, count, zeroing);
1159 if (err)
1160 return err;
1161
1162 nbytes -= count;
1163
1164 /*
1165 * fuse_copy_page may have moved a page from a pipe instead of
1166 * copying into our given page, so update the folios if it was
1167 * replaced.
1168 */
1169 if (pagep != orig)
1170 ap->folios[i] = page_folio(pagep);
1171 }
1172 return 0;
1173 }
1174
1175 /* Copy a single argument in the request to/from userspace buffer */
fuse_copy_one(struct fuse_copy_state * cs,void * val,unsigned size)1176 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1177 {
1178 while (size) {
1179 if (!cs->len) {
1180 int err = fuse_copy_fill(cs);
1181 if (err)
1182 return err;
1183 }
1184 fuse_copy_do(cs, &val, &size);
1185 }
1186 return 0;
1187 }
1188
1189 /* Copy request arguments to/from userspace buffer */
fuse_copy_args(struct fuse_copy_state * cs,unsigned numargs,unsigned argpages,struct fuse_arg * args,int zeroing)1190 int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1191 unsigned argpages, struct fuse_arg *args,
1192 int zeroing)
1193 {
1194 int err = 0;
1195 unsigned i;
1196
1197 for (i = 0; !err && i < numargs; i++) {
1198 struct fuse_arg *arg = &args[i];
1199 if (i == numargs - 1 && argpages)
1200 err = fuse_copy_pages(cs, arg->size, zeroing);
1201 else
1202 err = fuse_copy_one(cs, arg->value, arg->size);
1203 }
1204 return err;
1205 }
1206
forget_pending(struct fuse_iqueue * fiq)1207 static int forget_pending(struct fuse_iqueue *fiq)
1208 {
1209 return fiq->forget_list_head.next != NULL;
1210 }
1211
request_pending(struct fuse_iqueue * fiq)1212 static int request_pending(struct fuse_iqueue *fiq)
1213 {
1214 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1215 forget_pending(fiq);
1216 }
1217
1218 /*
1219 * Transfer an interrupt request to userspace
1220 *
1221 * Unlike other requests this is assembled on demand, without a need
1222 * to allocate a separate fuse_req structure.
1223 *
1224 * Called with fiq->lock held, releases it
1225 */
fuse_read_interrupt(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes,struct fuse_req * req)1226 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1227 struct fuse_copy_state *cs,
1228 size_t nbytes, struct fuse_req *req)
1229 __releases(fiq->lock)
1230 {
1231 struct fuse_in_header ih;
1232 struct fuse_interrupt_in arg;
1233 unsigned reqsize = sizeof(ih) + sizeof(arg);
1234 int err;
1235
1236 list_del_init(&req->intr_entry);
1237 memset(&ih, 0, sizeof(ih));
1238 memset(&arg, 0, sizeof(arg));
1239 ih.len = reqsize;
1240 ih.opcode = FUSE_INTERRUPT;
1241 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1242 arg.unique = req->in.h.unique;
1243
1244 spin_unlock(&fiq->lock);
1245 if (nbytes < reqsize)
1246 return -EINVAL;
1247
1248 err = fuse_copy_one(cs, &ih, sizeof(ih));
1249 if (!err)
1250 err = fuse_copy_one(cs, &arg, sizeof(arg));
1251 fuse_copy_finish(cs);
1252
1253 return err ? err : reqsize;
1254 }
1255
fuse_dequeue_forget(struct fuse_iqueue * fiq,unsigned int max,unsigned int * countp)1256 static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1257 unsigned int max,
1258 unsigned int *countp)
1259 {
1260 struct fuse_forget_link *head = fiq->forget_list_head.next;
1261 struct fuse_forget_link **newhead = &head;
1262 unsigned count;
1263
1264 for (count = 0; *newhead != NULL && count < max; count++)
1265 newhead = &(*newhead)->next;
1266
1267 fiq->forget_list_head.next = *newhead;
1268 *newhead = NULL;
1269 if (fiq->forget_list_head.next == NULL)
1270 fiq->forget_list_tail = &fiq->forget_list_head;
1271
1272 if (countp != NULL)
1273 *countp = count;
1274
1275 return head;
1276 }
1277
fuse_read_single_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1278 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1279 struct fuse_copy_state *cs,
1280 size_t nbytes)
1281 __releases(fiq->lock)
1282 {
1283 int err;
1284 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
1285 struct fuse_forget_in arg = {
1286 .nlookup = forget->forget_one.nlookup,
1287 };
1288 struct fuse_in_header ih = {
1289 .opcode = FUSE_FORGET,
1290 .nodeid = forget->forget_one.nodeid,
1291 .unique = fuse_get_unique_locked(fiq),
1292 .len = sizeof(ih) + sizeof(arg),
1293 };
1294
1295 spin_unlock(&fiq->lock);
1296 kfree(forget);
1297 if (nbytes < ih.len)
1298 return -EINVAL;
1299
1300 err = fuse_copy_one(cs, &ih, sizeof(ih));
1301 if (!err)
1302 err = fuse_copy_one(cs, &arg, sizeof(arg));
1303 fuse_copy_finish(cs);
1304
1305 if (err)
1306 return err;
1307
1308 return ih.len;
1309 }
1310
fuse_read_batch_forget(struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1311 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1312 struct fuse_copy_state *cs, size_t nbytes)
1313 __releases(fiq->lock)
1314 {
1315 int err;
1316 unsigned max_forgets;
1317 unsigned count;
1318 struct fuse_forget_link *head;
1319 struct fuse_batch_forget_in arg = { .count = 0 };
1320 struct fuse_in_header ih = {
1321 .opcode = FUSE_BATCH_FORGET,
1322 .unique = fuse_get_unique_locked(fiq),
1323 .len = sizeof(ih) + sizeof(arg),
1324 };
1325
1326 if (nbytes < ih.len) {
1327 spin_unlock(&fiq->lock);
1328 return -EINVAL;
1329 }
1330
1331 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1332 head = fuse_dequeue_forget(fiq, max_forgets, &count);
1333 spin_unlock(&fiq->lock);
1334
1335 arg.count = count;
1336 ih.len += count * sizeof(struct fuse_forget_one);
1337 err = fuse_copy_one(cs, &ih, sizeof(ih));
1338 if (!err)
1339 err = fuse_copy_one(cs, &arg, sizeof(arg));
1340
1341 while (head) {
1342 struct fuse_forget_link *forget = head;
1343
1344 if (!err) {
1345 err = fuse_copy_one(cs, &forget->forget_one,
1346 sizeof(forget->forget_one));
1347 }
1348 head = forget->next;
1349 kfree(forget);
1350 }
1351
1352 fuse_copy_finish(cs);
1353
1354 if (err)
1355 return err;
1356
1357 return ih.len;
1358 }
1359
fuse_read_forget(struct fuse_conn * fc,struct fuse_iqueue * fiq,struct fuse_copy_state * cs,size_t nbytes)1360 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1361 struct fuse_copy_state *cs,
1362 size_t nbytes)
1363 __releases(fiq->lock)
1364 {
1365 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1366 return fuse_read_single_forget(fiq, cs, nbytes);
1367 else
1368 return fuse_read_batch_forget(fiq, cs, nbytes);
1369 }
1370
1371 /*
1372 * Read a single request into the userspace filesystem's buffer. This
1373 * function waits until a request is available, then removes it from
1374 * the pending list and copies request data to userspace buffer. If
1375 * no reply is needed (FORGET) or request has been aborted or there
1376 * was an error during the copying then it's finished by calling
1377 * fuse_request_end(). Otherwise add it to the processing list, and set
1378 * the 'sent' flag.
1379 */
fuse_dev_do_read(struct fuse_dev * fud,struct file * file,struct fuse_copy_state * cs,size_t nbytes)1380 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1381 struct fuse_copy_state *cs, size_t nbytes)
1382 {
1383 ssize_t err;
1384 struct fuse_conn *fc = fud->fc;
1385 struct fuse_iqueue *fiq = &fc->iq;
1386 struct fuse_pqueue *fpq = &fud->pq;
1387 struct fuse_req *req;
1388 struct fuse_args *args;
1389 unsigned reqsize;
1390 unsigned int hash;
1391
1392 /*
1393 * Require sane minimum read buffer - that has capacity for fixed part
1394 * of any request header + negotiated max_write room for data.
1395 *
1396 * Historically libfuse reserves 4K for fixed header room, but e.g.
1397 * GlusterFS reserves only 80 bytes
1398 *
1399 * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
1400 *
1401 * which is the absolute minimum any sane filesystem should be using
1402 * for header room.
1403 */
1404 if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
1405 sizeof(struct fuse_in_header) +
1406 sizeof(struct fuse_write_in) +
1407 fc->max_write))
1408 return -EINVAL;
1409
1410 restart:
1411 for (;;) {
1412 spin_lock(&fiq->lock);
1413 if (!fiq->connected || request_pending(fiq))
1414 break;
1415 spin_unlock(&fiq->lock);
1416
1417 if (file->f_flags & O_NONBLOCK)
1418 return -EAGAIN;
1419 err = wait_event_interruptible_exclusive(fiq->waitq,
1420 !fiq->connected || request_pending(fiq));
1421 if (err)
1422 return err;
1423 }
1424
1425 if (!fiq->connected) {
1426 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1427 goto err_unlock;
1428 }
1429
1430 if (!list_empty(&fiq->interrupts)) {
1431 req = list_entry(fiq->interrupts.next, struct fuse_req,
1432 intr_entry);
1433 return fuse_read_interrupt(fiq, cs, nbytes, req);
1434 }
1435
1436 if (forget_pending(fiq)) {
1437 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1438 return fuse_read_forget(fc, fiq, cs, nbytes);
1439
1440 if (fiq->forget_batch <= -8)
1441 fiq->forget_batch = 16;
1442 }
1443
1444 req = list_entry(fiq->pending.next, struct fuse_req, list);
1445 clear_bit(FR_PENDING, &req->flags);
1446 list_del_init(&req->list);
1447 spin_unlock(&fiq->lock);
1448
1449 args = req->args;
1450 reqsize = req->in.h.len;
1451
1452 /* If request is too large, reply with an error and restart the read */
1453 if (nbytes < reqsize) {
1454 req->out.h.error = -EIO;
1455 /* SETXATTR is special, since it may contain too large data */
1456 if (args->opcode == FUSE_SETXATTR)
1457 req->out.h.error = -E2BIG;
1458 fuse_request_end(req);
1459 goto restart;
1460 }
1461 spin_lock(&fpq->lock);
1462 /*
1463 * Must not put request on fpq->io queue after having been shut down by
1464 * fuse_abort_conn()
1465 */
1466 if (!fpq->connected) {
1467 req->out.h.error = err = -ECONNABORTED;
1468 goto out_end;
1469
1470 }
1471 list_add(&req->list, &fpq->io);
1472 spin_unlock(&fpq->lock);
1473 cs->req = req;
1474 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1475 if (!err)
1476 err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
1477 (struct fuse_arg *) args->in_args, 0);
1478 fuse_copy_finish(cs);
1479 spin_lock(&fpq->lock);
1480 clear_bit(FR_LOCKED, &req->flags);
1481 if (!fpq->connected) {
1482 err = fc->aborted ? -ECONNABORTED : -ENODEV;
1483 goto out_end;
1484 }
1485 if (err) {
1486 req->out.h.error = -EIO;
1487 goto out_end;
1488 }
1489 if (!test_bit(FR_ISREPLY, &req->flags)) {
1490 err = reqsize;
1491 goto out_end;
1492 }
1493 hash = fuse_req_hash(req->in.h.unique);
1494 list_move_tail(&req->list, &fpq->processing[hash]);
1495 __fuse_get_request(req);
1496 set_bit(FR_SENT, &req->flags);
1497 spin_unlock(&fpq->lock);
1498 /* matches barrier in request_wait_answer() */
1499 smp_mb__after_atomic();
1500 if (test_bit(FR_INTERRUPTED, &req->flags))
1501 queue_interrupt(req);
1502 fuse_put_request(req);
1503
1504 return reqsize;
1505
1506 out_end:
1507 if (!test_bit(FR_PRIVATE, &req->flags))
1508 list_del_init(&req->list);
1509 spin_unlock(&fpq->lock);
1510 fuse_request_end(req);
1511 return err;
1512
1513 err_unlock:
1514 spin_unlock(&fiq->lock);
1515 return err;
1516 }
1517
fuse_dev_open(struct inode * inode,struct file * file)1518 static int fuse_dev_open(struct inode *inode, struct file *file)
1519 {
1520 /*
1521 * The fuse device's file's private_data is used to hold
1522 * the fuse_conn(ection) when it is mounted, and is used to
1523 * keep track of whether the file has been mounted already.
1524 */
1525 file->private_data = NULL;
1526 return 0;
1527 }
1528
fuse_dev_read(struct kiocb * iocb,struct iov_iter * to)1529 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1530 {
1531 struct fuse_copy_state cs;
1532 struct file *file = iocb->ki_filp;
1533 struct fuse_dev *fud = fuse_get_dev(file);
1534
1535 if (!fud)
1536 return -EPERM;
1537
1538 if (!user_backed_iter(to))
1539 return -EINVAL;
1540
1541 fuse_copy_init(&cs, 1, to);
1542
1543 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1544 }
1545
fuse_dev_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1546 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1547 struct pipe_inode_info *pipe,
1548 size_t len, unsigned int flags)
1549 {
1550 int total, ret;
1551 int page_nr = 0;
1552 struct pipe_buffer *bufs;
1553 struct fuse_copy_state cs;
1554 struct fuse_dev *fud = fuse_get_dev(in);
1555
1556 if (!fud)
1557 return -EPERM;
1558
1559 bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
1560 GFP_KERNEL);
1561 if (!bufs)
1562 return -ENOMEM;
1563
1564 fuse_copy_init(&cs, 1, NULL);
1565 cs.pipebufs = bufs;
1566 cs.pipe = pipe;
1567 ret = fuse_dev_do_read(fud, in, &cs, len);
1568 if (ret < 0)
1569 goto out;
1570
1571 if (pipe_buf_usage(pipe) + cs.nr_segs > pipe->max_usage) {
1572 ret = -EIO;
1573 goto out;
1574 }
1575
1576 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1577 /*
1578 * Need to be careful about this. Having buf->ops in module
1579 * code can Oops if the buffer persists after module unload.
1580 */
1581 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1582 bufs[page_nr].flags = 0;
1583 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1584 if (unlikely(ret < 0))
1585 break;
1586 }
1587 if (total)
1588 ret = total;
1589 out:
1590 for (; page_nr < cs.nr_segs; page_nr++)
1591 put_page(bufs[page_nr].page);
1592
1593 kvfree(bufs);
1594 return ret;
1595 }
1596
fuse_notify_poll(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1597 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1598 struct fuse_copy_state *cs)
1599 {
1600 struct fuse_notify_poll_wakeup_out outarg;
1601 int err = -EINVAL;
1602
1603 if (size != sizeof(outarg))
1604 goto err;
1605
1606 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1607 if (err)
1608 goto err;
1609
1610 fuse_copy_finish(cs);
1611 return fuse_notify_poll_wakeup(fc, &outarg);
1612
1613 err:
1614 fuse_copy_finish(cs);
1615 return err;
1616 }
1617
fuse_notify_inval_inode(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1618 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1619 struct fuse_copy_state *cs)
1620 {
1621 struct fuse_notify_inval_inode_out outarg;
1622 int err = -EINVAL;
1623
1624 if (size != sizeof(outarg))
1625 goto err;
1626
1627 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1628 if (err)
1629 goto err;
1630 fuse_copy_finish(cs);
1631
1632 down_read(&fc->killsb);
1633 err = fuse_reverse_inval_inode(fc, outarg.ino,
1634 outarg.off, outarg.len);
1635 up_read(&fc->killsb);
1636 return err;
1637
1638 err:
1639 fuse_copy_finish(cs);
1640 return err;
1641 }
1642
fuse_notify_inval_entry(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1643 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1644 struct fuse_copy_state *cs)
1645 {
1646 struct fuse_notify_inval_entry_out outarg;
1647 int err;
1648 char *buf = NULL;
1649 struct qstr name;
1650
1651 err = -EINVAL;
1652 if (size < sizeof(outarg))
1653 goto err;
1654
1655 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1656 if (err)
1657 goto err;
1658
1659 err = -ENAMETOOLONG;
1660 if (outarg.namelen > fc->name_max)
1661 goto err;
1662
1663 err = -EINVAL;
1664 if (size != sizeof(outarg) + outarg.namelen + 1)
1665 goto err;
1666
1667 err = -ENOMEM;
1668 buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
1669 if (!buf)
1670 goto err;
1671
1672 name.name = buf;
1673 name.len = outarg.namelen;
1674 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1675 if (err)
1676 goto err;
1677 fuse_copy_finish(cs);
1678 buf[outarg.namelen] = 0;
1679
1680 down_read(&fc->killsb);
1681 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
1682 up_read(&fc->killsb);
1683 kfree(buf);
1684 return err;
1685
1686 err:
1687 kfree(buf);
1688 fuse_copy_finish(cs);
1689 return err;
1690 }
1691
fuse_notify_delete(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1692 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1693 struct fuse_copy_state *cs)
1694 {
1695 struct fuse_notify_delete_out outarg;
1696 int err;
1697 char *buf = NULL;
1698 struct qstr name;
1699
1700 err = -EINVAL;
1701 if (size < sizeof(outarg))
1702 goto err;
1703
1704 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1705 if (err)
1706 goto err;
1707
1708 err = -ENAMETOOLONG;
1709 if (outarg.namelen > fc->name_max)
1710 goto err;
1711
1712 err = -EINVAL;
1713 if (size != sizeof(outarg) + outarg.namelen + 1)
1714 goto err;
1715
1716 err = -ENOMEM;
1717 buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
1718 if (!buf)
1719 goto err;
1720
1721 name.name = buf;
1722 name.len = outarg.namelen;
1723 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1724 if (err)
1725 goto err;
1726 fuse_copy_finish(cs);
1727 buf[outarg.namelen] = 0;
1728
1729 down_read(&fc->killsb);
1730 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
1731 up_read(&fc->killsb);
1732 kfree(buf);
1733 return err;
1734
1735 err:
1736 kfree(buf);
1737 fuse_copy_finish(cs);
1738 return err;
1739 }
1740
fuse_notify_store(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1741 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1742 struct fuse_copy_state *cs)
1743 {
1744 struct fuse_notify_store_out outarg;
1745 struct inode *inode;
1746 struct address_space *mapping;
1747 u64 nodeid;
1748 int err;
1749 pgoff_t index;
1750 unsigned int offset;
1751 unsigned int num;
1752 loff_t file_size;
1753 loff_t end;
1754
1755 err = -EINVAL;
1756 if (size < sizeof(outarg))
1757 goto out_finish;
1758
1759 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1760 if (err)
1761 goto out_finish;
1762
1763 err = -EINVAL;
1764 if (size - sizeof(outarg) != outarg.size)
1765 goto out_finish;
1766
1767 nodeid = outarg.nodeid;
1768
1769 down_read(&fc->killsb);
1770
1771 err = -ENOENT;
1772 inode = fuse_ilookup(fc, nodeid, NULL);
1773 if (!inode)
1774 goto out_up_killsb;
1775
1776 mapping = inode->i_mapping;
1777 index = outarg.offset >> PAGE_SHIFT;
1778 offset = outarg.offset & ~PAGE_MASK;
1779 file_size = i_size_read(inode);
1780 end = outarg.offset + outarg.size;
1781 if (end > file_size) {
1782 file_size = end;
1783 fuse_write_update_attr(inode, file_size, outarg.size);
1784 }
1785
1786 num = outarg.size;
1787 while (num) {
1788 struct folio *folio;
1789 struct page *page;
1790 unsigned int this_num;
1791
1792 folio = filemap_grab_folio(mapping, index);
1793 err = PTR_ERR(folio);
1794 if (IS_ERR(folio))
1795 goto out_iput;
1796
1797 page = &folio->page;
1798 this_num = min_t(unsigned, num, folio_size(folio) - offset);
1799 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1800 if (!folio_test_uptodate(folio) && !err && offset == 0 &&
1801 (this_num == folio_size(folio) || file_size == end)) {
1802 folio_zero_segment(folio, this_num, folio_size(folio));
1803 folio_mark_uptodate(folio);
1804 }
1805 folio_unlock(folio);
1806 folio_put(folio);
1807
1808 if (err)
1809 goto out_iput;
1810
1811 num -= this_num;
1812 offset = 0;
1813 index++;
1814 }
1815
1816 err = 0;
1817
1818 out_iput:
1819 iput(inode);
1820 out_up_killsb:
1821 up_read(&fc->killsb);
1822 out_finish:
1823 fuse_copy_finish(cs);
1824 return err;
1825 }
1826
1827 struct fuse_retrieve_args {
1828 struct fuse_args_pages ap;
1829 struct fuse_notify_retrieve_in inarg;
1830 };
1831
fuse_retrieve_end(struct fuse_mount * fm,struct fuse_args * args,int error)1832 static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
1833 int error)
1834 {
1835 struct fuse_retrieve_args *ra =
1836 container_of(args, typeof(*ra), ap.args);
1837
1838 release_pages(ra->ap.folios, ra->ap.num_folios);
1839 kfree(ra);
1840 }
1841
fuse_retrieve(struct fuse_mount * fm,struct inode * inode,struct fuse_notify_retrieve_out * outarg)1842 static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
1843 struct fuse_notify_retrieve_out *outarg)
1844 {
1845 int err;
1846 struct address_space *mapping = inode->i_mapping;
1847 pgoff_t index;
1848 loff_t file_size;
1849 unsigned int num;
1850 unsigned int offset;
1851 size_t total_len = 0;
1852 unsigned int num_pages, cur_pages = 0;
1853 struct fuse_conn *fc = fm->fc;
1854 struct fuse_retrieve_args *ra;
1855 size_t args_size = sizeof(*ra);
1856 struct fuse_args_pages *ap;
1857 struct fuse_args *args;
1858
1859 offset = outarg->offset & ~PAGE_MASK;
1860 file_size = i_size_read(inode);
1861
1862 num = min(outarg->size, fc->max_write);
1863 if (outarg->offset > file_size)
1864 num = 0;
1865 else if (outarg->offset + num > file_size)
1866 num = file_size - outarg->offset;
1867
1868 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1869 num_pages = min(num_pages, fc->max_pages);
1870
1871 args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->descs[0]));
1872
1873 ra = kzalloc(args_size, GFP_KERNEL);
1874 if (!ra)
1875 return -ENOMEM;
1876
1877 ap = &ra->ap;
1878 ap->folios = (void *) (ra + 1);
1879 ap->descs = (void *) (ap->folios + num_pages);
1880
1881 args = &ap->args;
1882 args->nodeid = outarg->nodeid;
1883 args->opcode = FUSE_NOTIFY_REPLY;
1884 args->in_numargs = 3;
1885 args->in_pages = true;
1886 args->end = fuse_retrieve_end;
1887
1888 index = outarg->offset >> PAGE_SHIFT;
1889
1890 while (num && cur_pages < num_pages) {
1891 struct folio *folio;
1892 unsigned int this_num;
1893
1894 folio = filemap_get_folio(mapping, index);
1895 if (IS_ERR(folio))
1896 break;
1897
1898 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1899 ap->folios[ap->num_folios] = folio;
1900 ap->descs[ap->num_folios].offset = offset;
1901 ap->descs[ap->num_folios].length = this_num;
1902 ap->num_folios++;
1903 cur_pages++;
1904
1905 offset = 0;
1906 num -= this_num;
1907 total_len += this_num;
1908 index++;
1909 }
1910 ra->inarg.offset = outarg->offset;
1911 ra->inarg.size = total_len;
1912 fuse_set_zero_arg0(args);
1913 args->in_args[1].size = sizeof(ra->inarg);
1914 args->in_args[1].value = &ra->inarg;
1915 args->in_args[2].size = total_len;
1916
1917 err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
1918 if (err)
1919 fuse_retrieve_end(fm, args, err);
1920
1921 return err;
1922 }
1923
fuse_notify_retrieve(struct fuse_conn * fc,unsigned int size,struct fuse_copy_state * cs)1924 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1925 struct fuse_copy_state *cs)
1926 {
1927 struct fuse_notify_retrieve_out outarg;
1928 struct fuse_mount *fm;
1929 struct inode *inode;
1930 u64 nodeid;
1931 int err;
1932
1933 err = -EINVAL;
1934 if (size != sizeof(outarg))
1935 goto copy_finish;
1936
1937 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1938 if (err)
1939 goto copy_finish;
1940
1941 fuse_copy_finish(cs);
1942
1943 down_read(&fc->killsb);
1944 err = -ENOENT;
1945 nodeid = outarg.nodeid;
1946
1947 inode = fuse_ilookup(fc, nodeid, &fm);
1948 if (inode) {
1949 err = fuse_retrieve(fm, inode, &outarg);
1950 iput(inode);
1951 }
1952 up_read(&fc->killsb);
1953
1954 return err;
1955
1956 copy_finish:
1957 fuse_copy_finish(cs);
1958 return err;
1959 }
1960
1961 /*
1962 * Resending all processing queue requests.
1963 *
1964 * During a FUSE daemon panics and failover, it is possible for some inflight
1965 * requests to be lost and never returned. As a result, applications awaiting
1966 * replies would become stuck forever. To address this, we can use notification
1967 * to trigger resending of these pending requests to the FUSE daemon, ensuring
1968 * they are properly processed again.
1969 *
1970 * Please note that this strategy is applicable only to idempotent requests or
1971 * if the FUSE daemon takes careful measures to avoid processing duplicated
1972 * non-idempotent requests.
1973 */
fuse_resend(struct fuse_conn * fc)1974 static void fuse_resend(struct fuse_conn *fc)
1975 {
1976 struct fuse_dev *fud;
1977 struct fuse_req *req, *next;
1978 struct fuse_iqueue *fiq = &fc->iq;
1979 LIST_HEAD(to_queue);
1980 unsigned int i;
1981
1982 spin_lock(&fc->lock);
1983 if (!fc->connected) {
1984 spin_unlock(&fc->lock);
1985 return;
1986 }
1987
1988 list_for_each_entry(fud, &fc->devices, entry) {
1989 struct fuse_pqueue *fpq = &fud->pq;
1990
1991 spin_lock(&fpq->lock);
1992 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
1993 list_splice_tail_init(&fpq->processing[i], &to_queue);
1994 spin_unlock(&fpq->lock);
1995 }
1996 spin_unlock(&fc->lock);
1997
1998 list_for_each_entry_safe(req, next, &to_queue, list) {
1999 set_bit(FR_PENDING, &req->flags);
2000 clear_bit(FR_SENT, &req->flags);
2001 /* mark the request as resend request */
2002 req->in.h.unique |= FUSE_UNIQUE_RESEND;
2003 }
2004
2005 spin_lock(&fiq->lock);
2006 if (!fiq->connected) {
2007 spin_unlock(&fiq->lock);
2008 list_for_each_entry(req, &to_queue, list)
2009 clear_bit(FR_PENDING, &req->flags);
2010 fuse_dev_end_requests(&to_queue);
2011 return;
2012 }
2013 /* iq and pq requests are both oldest to newest */
2014 list_splice(&to_queue, &fiq->pending);
2015 fuse_dev_wake_and_unlock(fiq);
2016 }
2017
fuse_notify_resend(struct fuse_conn * fc)2018 static int fuse_notify_resend(struct fuse_conn *fc)
2019 {
2020 fuse_resend(fc);
2021 return 0;
2022 }
2023
fuse_notify(struct fuse_conn * fc,enum fuse_notify_code code,unsigned int size,struct fuse_copy_state * cs)2024 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
2025 unsigned int size, struct fuse_copy_state *cs)
2026 {
2027 /* Don't try to move pages (yet) */
2028 cs->move_pages = 0;
2029
2030 switch (code) {
2031 case FUSE_NOTIFY_POLL:
2032 return fuse_notify_poll(fc, size, cs);
2033
2034 case FUSE_NOTIFY_INVAL_INODE:
2035 return fuse_notify_inval_inode(fc, size, cs);
2036
2037 case FUSE_NOTIFY_INVAL_ENTRY:
2038 return fuse_notify_inval_entry(fc, size, cs);
2039
2040 case FUSE_NOTIFY_STORE:
2041 return fuse_notify_store(fc, size, cs);
2042
2043 case FUSE_NOTIFY_RETRIEVE:
2044 return fuse_notify_retrieve(fc, size, cs);
2045
2046 case FUSE_NOTIFY_DELETE:
2047 return fuse_notify_delete(fc, size, cs);
2048
2049 case FUSE_NOTIFY_RESEND:
2050 return fuse_notify_resend(fc);
2051
2052 default:
2053 fuse_copy_finish(cs);
2054 return -EINVAL;
2055 }
2056 }
2057
2058 /* Look up request on processing list by unique ID */
fuse_request_find(struct fuse_pqueue * fpq,u64 unique)2059 struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique)
2060 {
2061 unsigned int hash = fuse_req_hash(unique);
2062 struct fuse_req *req;
2063
2064 list_for_each_entry(req, &fpq->processing[hash], list) {
2065 if (req->in.h.unique == unique)
2066 return req;
2067 }
2068 return NULL;
2069 }
2070
fuse_copy_out_args(struct fuse_copy_state * cs,struct fuse_args * args,unsigned nbytes)2071 int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
2072 unsigned nbytes)
2073 {
2074
2075 unsigned int reqsize = 0;
2076
2077 /*
2078 * Uring has all headers separated from args - args is payload only
2079 */
2080 if (!cs->is_uring)
2081 reqsize = sizeof(struct fuse_out_header);
2082
2083 reqsize += fuse_len_args(args->out_numargs, args->out_args);
2084
2085 if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
2086 return -EINVAL;
2087 else if (reqsize > nbytes) {
2088 struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
2089 unsigned diffsize = reqsize - nbytes;
2090
2091 if (diffsize > lastarg->size)
2092 return -EINVAL;
2093 lastarg->size -= diffsize;
2094 }
2095 return fuse_copy_args(cs, args->out_numargs, args->out_pages,
2096 args->out_args, args->page_zeroing);
2097 }
2098
2099 /*
2100 * Write a single reply to a request. First the header is copied from
2101 * the write buffer. The request is then searched on the processing
2102 * list by the unique ID found in the header. If found, then remove
2103 * it from the list and copy the rest of the buffer to the request.
2104 * The request is finished by calling fuse_request_end().
2105 */
fuse_dev_do_write(struct fuse_dev * fud,struct fuse_copy_state * cs,size_t nbytes)2106 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
2107 struct fuse_copy_state *cs, size_t nbytes)
2108 {
2109 int err;
2110 struct fuse_conn *fc = fud->fc;
2111 struct fuse_pqueue *fpq = &fud->pq;
2112 struct fuse_req *req;
2113 struct fuse_out_header oh;
2114
2115 err = -EINVAL;
2116 if (nbytes < sizeof(struct fuse_out_header))
2117 goto out;
2118
2119 err = fuse_copy_one(cs, &oh, sizeof(oh));
2120 if (err)
2121 goto copy_finish;
2122
2123 err = -EINVAL;
2124 if (oh.len != nbytes)
2125 goto copy_finish;
2126
2127 /*
2128 * Zero oh.unique indicates unsolicited notification message
2129 * and error contains notification code.
2130 */
2131 if (!oh.unique) {
2132 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
2133 goto out;
2134 }
2135
2136 err = -EINVAL;
2137 if (oh.error <= -512 || oh.error > 0)
2138 goto copy_finish;
2139
2140 spin_lock(&fpq->lock);
2141 req = NULL;
2142 if (fpq->connected)
2143 req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
2144
2145 err = -ENOENT;
2146 if (!req) {
2147 spin_unlock(&fpq->lock);
2148 goto copy_finish;
2149 }
2150
2151 /* Is it an interrupt reply ID? */
2152 if (oh.unique & FUSE_INT_REQ_BIT) {
2153 __fuse_get_request(req);
2154 spin_unlock(&fpq->lock);
2155
2156 err = 0;
2157 if (nbytes != sizeof(struct fuse_out_header))
2158 err = -EINVAL;
2159 else if (oh.error == -ENOSYS)
2160 fc->no_interrupt = 1;
2161 else if (oh.error == -EAGAIN)
2162 err = queue_interrupt(req);
2163
2164 fuse_put_request(req);
2165
2166 goto copy_finish;
2167 }
2168
2169 clear_bit(FR_SENT, &req->flags);
2170 list_move(&req->list, &fpq->io);
2171 req->out.h = oh;
2172 set_bit(FR_LOCKED, &req->flags);
2173 spin_unlock(&fpq->lock);
2174 cs->req = req;
2175 if (!req->args->page_replace)
2176 cs->move_pages = 0;
2177
2178 if (oh.error)
2179 err = nbytes != sizeof(oh) ? -EINVAL : 0;
2180 else
2181 err = fuse_copy_out_args(cs, req->args, nbytes);
2182 fuse_copy_finish(cs);
2183
2184 spin_lock(&fpq->lock);
2185 clear_bit(FR_LOCKED, &req->flags);
2186 if (!fpq->connected)
2187 err = -ENOENT;
2188 else if (err)
2189 req->out.h.error = -EIO;
2190 if (!test_bit(FR_PRIVATE, &req->flags))
2191 list_del_init(&req->list);
2192 spin_unlock(&fpq->lock);
2193
2194 fuse_request_end(req);
2195 out:
2196 return err ? err : nbytes;
2197
2198 copy_finish:
2199 fuse_copy_finish(cs);
2200 goto out;
2201 }
2202
fuse_dev_write(struct kiocb * iocb,struct iov_iter * from)2203 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
2204 {
2205 struct fuse_copy_state cs;
2206 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
2207
2208 if (!fud)
2209 return -EPERM;
2210
2211 if (!user_backed_iter(from))
2212 return -EINVAL;
2213
2214 fuse_copy_init(&cs, 0, from);
2215
2216 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
2217 }
2218
fuse_dev_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)2219 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2220 struct file *out, loff_t *ppos,
2221 size_t len, unsigned int flags)
2222 {
2223 unsigned int head, tail, count;
2224 unsigned nbuf;
2225 unsigned idx;
2226 struct pipe_buffer *bufs;
2227 struct fuse_copy_state cs;
2228 struct fuse_dev *fud;
2229 size_t rem;
2230 ssize_t ret;
2231
2232 fud = fuse_get_dev(out);
2233 if (!fud)
2234 return -EPERM;
2235
2236 pipe_lock(pipe);
2237
2238 head = pipe->head;
2239 tail = pipe->tail;
2240 count = pipe_occupancy(head, tail);
2241
2242 bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
2243 if (!bufs) {
2244 pipe_unlock(pipe);
2245 return -ENOMEM;
2246 }
2247
2248 nbuf = 0;
2249 rem = 0;
2250 for (idx = tail; !pipe_empty(head, idx) && rem < len; idx++)
2251 rem += pipe_buf(pipe, idx)->len;
2252
2253 ret = -EINVAL;
2254 if (rem < len)
2255 goto out_free;
2256
2257 rem = len;
2258 while (rem) {
2259 struct pipe_buffer *ibuf;
2260 struct pipe_buffer *obuf;
2261
2262 if (WARN_ON(nbuf >= count || pipe_empty(head, tail)))
2263 goto out_free;
2264
2265 ibuf = pipe_buf(pipe, tail);
2266 obuf = &bufs[nbuf];
2267
2268 if (rem >= ibuf->len) {
2269 *obuf = *ibuf;
2270 ibuf->ops = NULL;
2271 tail++;
2272 pipe->tail = tail;
2273 } else {
2274 if (!pipe_buf_get(pipe, ibuf))
2275 goto out_free;
2276
2277 *obuf = *ibuf;
2278 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2279 obuf->len = rem;
2280 ibuf->offset += obuf->len;
2281 ibuf->len -= obuf->len;
2282 }
2283 nbuf++;
2284 rem -= obuf->len;
2285 }
2286 pipe_unlock(pipe);
2287
2288 fuse_copy_init(&cs, 0, NULL);
2289 cs.pipebufs = bufs;
2290 cs.nr_segs = nbuf;
2291 cs.pipe = pipe;
2292
2293 if (flags & SPLICE_F_MOVE)
2294 cs.move_pages = 1;
2295
2296 ret = fuse_dev_do_write(fud, &cs, len);
2297
2298 pipe_lock(pipe);
2299 out_free:
2300 for (idx = 0; idx < nbuf; idx++) {
2301 struct pipe_buffer *buf = &bufs[idx];
2302
2303 if (buf->ops)
2304 pipe_buf_release(pipe, buf);
2305 }
2306 pipe_unlock(pipe);
2307
2308 kvfree(bufs);
2309 return ret;
2310 }
2311
fuse_dev_poll(struct file * file,poll_table * wait)2312 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
2313 {
2314 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
2315 struct fuse_iqueue *fiq;
2316 struct fuse_dev *fud = fuse_get_dev(file);
2317
2318 if (!fud)
2319 return EPOLLERR;
2320
2321 fiq = &fud->fc->iq;
2322 poll_wait(file, &fiq->waitq, wait);
2323
2324 spin_lock(&fiq->lock);
2325 if (!fiq->connected)
2326 mask = EPOLLERR;
2327 else if (request_pending(fiq))
2328 mask |= EPOLLIN | EPOLLRDNORM;
2329 spin_unlock(&fiq->lock);
2330
2331 return mask;
2332 }
2333
2334 /* Abort all requests on the given list (pending or processing) */
fuse_dev_end_requests(struct list_head * head)2335 void fuse_dev_end_requests(struct list_head *head)
2336 {
2337 while (!list_empty(head)) {
2338 struct fuse_req *req;
2339 req = list_entry(head->next, struct fuse_req, list);
2340 req->out.h.error = -ECONNABORTED;
2341 clear_bit(FR_SENT, &req->flags);
2342 list_del_init(&req->list);
2343 fuse_request_end(req);
2344 }
2345 }
2346
end_polls(struct fuse_conn * fc)2347 static void end_polls(struct fuse_conn *fc)
2348 {
2349 struct rb_node *p;
2350
2351 p = rb_first(&fc->polled_files);
2352
2353 while (p) {
2354 struct fuse_file *ff;
2355 ff = rb_entry(p, struct fuse_file, polled_node);
2356 wake_up_interruptible_all(&ff->poll_wait);
2357
2358 p = rb_next(p);
2359 }
2360 }
2361
2362 /*
2363 * Abort all requests.
2364 *
2365 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2366 * filesystem.
2367 *
2368 * The same effect is usually achievable through killing the filesystem daemon
2369 * and all users of the filesystem. The exception is the combination of an
2370 * asynchronous request and the tricky deadlock (see
2371 * Documentation/filesystems/fuse.rst).
2372 *
2373 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2374 * requests, they should be finished off immediately. Locked requests will be
2375 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2376 * requests. It is possible that some request will finish before we can. This
2377 * is OK, the request will in that case be removed from the list before we touch
2378 * it.
2379 */
fuse_abort_conn(struct fuse_conn * fc)2380 void fuse_abort_conn(struct fuse_conn *fc)
2381 {
2382 struct fuse_iqueue *fiq = &fc->iq;
2383
2384 spin_lock(&fc->lock);
2385 if (fc->connected) {
2386 struct fuse_dev *fud;
2387 struct fuse_req *req, *next;
2388 LIST_HEAD(to_end);
2389 unsigned int i;
2390
2391 if (fc->timeout.req_timeout)
2392 cancel_delayed_work(&fc->timeout.work);
2393
2394 /* Background queuing checks fc->connected under bg_lock */
2395 spin_lock(&fc->bg_lock);
2396 fc->connected = 0;
2397 spin_unlock(&fc->bg_lock);
2398
2399 fuse_set_initialized(fc);
2400 list_for_each_entry(fud, &fc->devices, entry) {
2401 struct fuse_pqueue *fpq = &fud->pq;
2402
2403 spin_lock(&fpq->lock);
2404 fpq->connected = 0;
2405 list_for_each_entry_safe(req, next, &fpq->io, list) {
2406 req->out.h.error = -ECONNABORTED;
2407 spin_lock(&req->waitq.lock);
2408 set_bit(FR_ABORTED, &req->flags);
2409 if (!test_bit(FR_LOCKED, &req->flags)) {
2410 set_bit(FR_PRIVATE, &req->flags);
2411 __fuse_get_request(req);
2412 list_move(&req->list, &to_end);
2413 }
2414 spin_unlock(&req->waitq.lock);
2415 }
2416 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2417 list_splice_tail_init(&fpq->processing[i],
2418 &to_end);
2419 spin_unlock(&fpq->lock);
2420 }
2421 spin_lock(&fc->bg_lock);
2422 fc->blocked = 0;
2423 fc->max_background = UINT_MAX;
2424 flush_bg_queue(fc);
2425 spin_unlock(&fc->bg_lock);
2426
2427 spin_lock(&fiq->lock);
2428 fiq->connected = 0;
2429 list_for_each_entry(req, &fiq->pending, list)
2430 clear_bit(FR_PENDING, &req->flags);
2431 list_splice_tail_init(&fiq->pending, &to_end);
2432 while (forget_pending(fiq))
2433 kfree(fuse_dequeue_forget(fiq, 1, NULL));
2434 wake_up_all(&fiq->waitq);
2435 spin_unlock(&fiq->lock);
2436 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2437 end_polls(fc);
2438 wake_up_all(&fc->blocked_waitq);
2439 spin_unlock(&fc->lock);
2440
2441 fuse_dev_end_requests(&to_end);
2442
2443 /*
2444 * fc->lock must not be taken to avoid conflicts with io-uring
2445 * locks
2446 */
2447 fuse_uring_abort(fc);
2448 } else {
2449 spin_unlock(&fc->lock);
2450 }
2451 }
2452 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2453
fuse_wait_aborted(struct fuse_conn * fc)2454 void fuse_wait_aborted(struct fuse_conn *fc)
2455 {
2456 /* matches implicit memory barrier in fuse_drop_waiting() */
2457 smp_mb();
2458 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2459
2460 fuse_uring_wait_stopped_queues(fc);
2461 }
2462
fuse_dev_release(struct inode * inode,struct file * file)2463 int fuse_dev_release(struct inode *inode, struct file *file)
2464 {
2465 struct fuse_dev *fud = fuse_get_dev(file);
2466
2467 if (fud) {
2468 struct fuse_conn *fc = fud->fc;
2469 struct fuse_pqueue *fpq = &fud->pq;
2470 LIST_HEAD(to_end);
2471 unsigned int i;
2472
2473 spin_lock(&fpq->lock);
2474 WARN_ON(!list_empty(&fpq->io));
2475 for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
2476 list_splice_init(&fpq->processing[i], &to_end);
2477 spin_unlock(&fpq->lock);
2478
2479 fuse_dev_end_requests(&to_end);
2480
2481 /* Are we the last open device? */
2482 if (atomic_dec_and_test(&fc->dev_count)) {
2483 WARN_ON(fc->iq.fasync != NULL);
2484 fuse_abort_conn(fc);
2485 }
2486 fuse_dev_free(fud);
2487 }
2488 return 0;
2489 }
2490 EXPORT_SYMBOL_GPL(fuse_dev_release);
2491
fuse_dev_fasync(int fd,struct file * file,int on)2492 static int fuse_dev_fasync(int fd, struct file *file, int on)
2493 {
2494 struct fuse_dev *fud = fuse_get_dev(file);
2495
2496 if (!fud)
2497 return -EPERM;
2498
2499 /* No locking - fasync_helper does its own locking */
2500 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2501 }
2502
fuse_device_clone(struct fuse_conn * fc,struct file * new)2503 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2504 {
2505 struct fuse_dev *fud;
2506
2507 if (new->private_data)
2508 return -EINVAL;
2509
2510 fud = fuse_dev_alloc_install(fc);
2511 if (!fud)
2512 return -ENOMEM;
2513
2514 new->private_data = fud;
2515 atomic_inc(&fc->dev_count);
2516
2517 return 0;
2518 }
2519
fuse_dev_ioctl_clone(struct file * file,__u32 __user * argp)2520 static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
2521 {
2522 int res;
2523 int oldfd;
2524 struct fuse_dev *fud = NULL;
2525
2526 if (get_user(oldfd, argp))
2527 return -EFAULT;
2528
2529 CLASS(fd, f)(oldfd);
2530 if (fd_empty(f))
2531 return -EINVAL;
2532
2533 /*
2534 * Check against file->f_op because CUSE
2535 * uses the same ioctl handler.
2536 */
2537 if (fd_file(f)->f_op == file->f_op)
2538 fud = fuse_get_dev(fd_file(f));
2539
2540 res = -EINVAL;
2541 if (fud) {
2542 mutex_lock(&fuse_mutex);
2543 res = fuse_device_clone(fud->fc, file);
2544 mutex_unlock(&fuse_mutex);
2545 }
2546
2547 return res;
2548 }
2549
fuse_dev_ioctl_backing_open(struct file * file,struct fuse_backing_map __user * argp)2550 static long fuse_dev_ioctl_backing_open(struct file *file,
2551 struct fuse_backing_map __user *argp)
2552 {
2553 struct fuse_dev *fud = fuse_get_dev(file);
2554 struct fuse_backing_map map;
2555
2556 if (!fud)
2557 return -EPERM;
2558
2559 if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
2560 return -EOPNOTSUPP;
2561
2562 if (copy_from_user(&map, argp, sizeof(map)))
2563 return -EFAULT;
2564
2565 return fuse_backing_open(fud->fc, &map);
2566 }
2567
fuse_dev_ioctl_backing_close(struct file * file,__u32 __user * argp)2568 static long fuse_dev_ioctl_backing_close(struct file *file, __u32 __user *argp)
2569 {
2570 struct fuse_dev *fud = fuse_get_dev(file);
2571 int backing_id;
2572
2573 if (!fud)
2574 return -EPERM;
2575
2576 if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
2577 return -EOPNOTSUPP;
2578
2579 if (get_user(backing_id, argp))
2580 return -EFAULT;
2581
2582 return fuse_backing_close(fud->fc, backing_id);
2583 }
2584
fuse_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2585 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2586 unsigned long arg)
2587 {
2588 void __user *argp = (void __user *)arg;
2589
2590 switch (cmd) {
2591 case FUSE_DEV_IOC_CLONE:
2592 return fuse_dev_ioctl_clone(file, argp);
2593
2594 case FUSE_DEV_IOC_BACKING_OPEN:
2595 return fuse_dev_ioctl_backing_open(file, argp);
2596
2597 case FUSE_DEV_IOC_BACKING_CLOSE:
2598 return fuse_dev_ioctl_backing_close(file, argp);
2599
2600 default:
2601 return -ENOTTY;
2602 }
2603 }
2604
2605 const struct file_operations fuse_dev_operations = {
2606 .owner = THIS_MODULE,
2607 .open = fuse_dev_open,
2608 .read_iter = fuse_dev_read,
2609 .splice_read = fuse_dev_splice_read,
2610 .write_iter = fuse_dev_write,
2611 .splice_write = fuse_dev_splice_write,
2612 .poll = fuse_dev_poll,
2613 .release = fuse_dev_release,
2614 .fasync = fuse_dev_fasync,
2615 .unlocked_ioctl = fuse_dev_ioctl,
2616 .compat_ioctl = compat_ptr_ioctl,
2617 #ifdef CONFIG_FUSE_IO_URING
2618 .uring_cmd = fuse_uring_cmd,
2619 #endif
2620 };
2621 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2622
2623 static struct miscdevice fuse_miscdevice = {
2624 .minor = FUSE_MINOR,
2625 .name = "fuse",
2626 .fops = &fuse_dev_operations,
2627 };
2628
fuse_dev_init(void)2629 int __init fuse_dev_init(void)
2630 {
2631 int err = -ENOMEM;
2632 fuse_req_cachep = kmem_cache_create("fuse_request",
2633 sizeof(struct fuse_req),
2634 0, 0, NULL);
2635 if (!fuse_req_cachep)
2636 goto out;
2637
2638 err = misc_register(&fuse_miscdevice);
2639 if (err)
2640 goto out_cache_clean;
2641
2642 return 0;
2643
2644 out_cache_clean:
2645 kmem_cache_destroy(fuse_req_cachep);
2646 out:
2647 return err;
2648 }
2649
fuse_dev_cleanup(void)2650 void fuse_dev_cleanup(void)
2651 {
2652 misc_deregister(&fuse_miscdevice);
2653 kmem_cache_destroy(fuse_req_cachep);
2654 }
2655