xref: /linux/fs/fuse/dev.c (revision 843aef4930b9953c9ca624a990b201440304b56f)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8 
9 #include "fuse_i.h"
10 
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21 
22 static struct kmem_cache *fuse_req_cachep;
23 
24 static struct fuse_conn *fuse_get_conn(struct file *file)
25 {
26 	/*
27 	 * Lockless access is OK, because file->private data is set
28 	 * once during mount and is valid until the file is released.
29 	 */
30 	return file->private_data;
31 }
32 
33 static void fuse_request_init(struct fuse_req *req)
34 {
35 	memset(req, 0, sizeof(*req));
36 	INIT_LIST_HEAD(&req->list);
37 	INIT_LIST_HEAD(&req->intr_entry);
38 	init_waitqueue_head(&req->waitq);
39 	atomic_set(&req->count, 1);
40 }
41 
42 struct fuse_req *fuse_request_alloc(void)
43 {
44 	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
45 	if (req)
46 		fuse_request_init(req);
47 	return req;
48 }
49 
50 struct fuse_req *fuse_request_alloc_nofs(void)
51 {
52 	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
53 	if (req)
54 		fuse_request_init(req);
55 	return req;
56 }
57 
58 void fuse_request_free(struct fuse_req *req)
59 {
60 	kmem_cache_free(fuse_req_cachep, req);
61 }
62 
63 static void block_sigs(sigset_t *oldset)
64 {
65 	sigset_t mask;
66 
67 	siginitsetinv(&mask, sigmask(SIGKILL));
68 	sigprocmask(SIG_BLOCK, &mask, oldset);
69 }
70 
71 static void restore_sigs(sigset_t *oldset)
72 {
73 	sigprocmask(SIG_SETMASK, oldset, NULL);
74 }
75 
76 static void __fuse_get_request(struct fuse_req *req)
77 {
78 	atomic_inc(&req->count);
79 }
80 
81 /* Must be called with > 1 refcount */
82 static void __fuse_put_request(struct fuse_req *req)
83 {
84 	BUG_ON(atomic_read(&req->count) < 2);
85 	atomic_dec(&req->count);
86 }
87 
88 static void fuse_req_init_context(struct fuse_req *req)
89 {
90 	req->in.h.uid = current_fsuid();
91 	req->in.h.gid = current_fsgid();
92 	req->in.h.pid = current->pid;
93 }
94 
95 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
96 {
97 	struct fuse_req *req;
98 	sigset_t oldset;
99 	int intr;
100 	int err;
101 
102 	atomic_inc(&fc->num_waiting);
103 	block_sigs(&oldset);
104 	intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
105 	restore_sigs(&oldset);
106 	err = -EINTR;
107 	if (intr)
108 		goto out;
109 
110 	err = -ENOTCONN;
111 	if (!fc->connected)
112 		goto out;
113 
114 	req = fuse_request_alloc();
115 	err = -ENOMEM;
116 	if (!req)
117 		goto out;
118 
119 	fuse_req_init_context(req);
120 	req->waiting = 1;
121 	return req;
122 
123  out:
124 	atomic_dec(&fc->num_waiting);
125 	return ERR_PTR(err);
126 }
127 
128 /*
129  * Return request in fuse_file->reserved_req.  However that may
130  * currently be in use.  If that is the case, wait for it to become
131  * available.
132  */
133 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
134 					 struct file *file)
135 {
136 	struct fuse_req *req = NULL;
137 	struct fuse_file *ff = file->private_data;
138 
139 	do {
140 		wait_event(fc->reserved_req_waitq, ff->reserved_req);
141 		spin_lock(&fc->lock);
142 		if (ff->reserved_req) {
143 			req = ff->reserved_req;
144 			ff->reserved_req = NULL;
145 			get_file(file);
146 			req->stolen_file = file;
147 		}
148 		spin_unlock(&fc->lock);
149 	} while (!req);
150 
151 	return req;
152 }
153 
154 /*
155  * Put stolen request back into fuse_file->reserved_req
156  */
157 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
158 {
159 	struct file *file = req->stolen_file;
160 	struct fuse_file *ff = file->private_data;
161 
162 	spin_lock(&fc->lock);
163 	fuse_request_init(req);
164 	BUG_ON(ff->reserved_req);
165 	ff->reserved_req = req;
166 	wake_up_all(&fc->reserved_req_waitq);
167 	spin_unlock(&fc->lock);
168 	fput(file);
169 }
170 
171 /*
172  * Gets a requests for a file operation, always succeeds
173  *
174  * This is used for sending the FLUSH request, which must get to
175  * userspace, due to POSIX locks which may need to be unlocked.
176  *
177  * If allocation fails due to OOM, use the reserved request in
178  * fuse_file.
179  *
180  * This is very unlikely to deadlock accidentally, since the
181  * filesystem should not have it's own file open.  If deadlock is
182  * intentional, it can still be broken by "aborting" the filesystem.
183  */
184 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
185 {
186 	struct fuse_req *req;
187 
188 	atomic_inc(&fc->num_waiting);
189 	wait_event(fc->blocked_waitq, !fc->blocked);
190 	req = fuse_request_alloc();
191 	if (!req)
192 		req = get_reserved_req(fc, file);
193 
194 	fuse_req_init_context(req);
195 	req->waiting = 1;
196 	return req;
197 }
198 
199 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
200 {
201 	if (atomic_dec_and_test(&req->count)) {
202 		if (req->waiting)
203 			atomic_dec(&fc->num_waiting);
204 
205 		if (req->stolen_file)
206 			put_reserved_req(fc, req);
207 		else
208 			fuse_request_free(req);
209 	}
210 }
211 
212 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
213 {
214 	unsigned nbytes = 0;
215 	unsigned i;
216 
217 	for (i = 0; i < numargs; i++)
218 		nbytes += args[i].size;
219 
220 	return nbytes;
221 }
222 
223 static u64 fuse_get_unique(struct fuse_conn *fc)
224 {
225 	fc->reqctr++;
226 	/* zero is special */
227 	if (fc->reqctr == 0)
228 		fc->reqctr = 1;
229 
230 	return fc->reqctr;
231 }
232 
233 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
234 {
235 	req->in.h.unique = fuse_get_unique(fc);
236 	req->in.h.len = sizeof(struct fuse_in_header) +
237 		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
238 	list_add_tail(&req->list, &fc->pending);
239 	req->state = FUSE_REQ_PENDING;
240 	if (!req->waiting) {
241 		req->waiting = 1;
242 		atomic_inc(&fc->num_waiting);
243 	}
244 	wake_up(&fc->waitq);
245 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
246 }
247 
248 static void flush_bg_queue(struct fuse_conn *fc)
249 {
250 	while (fc->active_background < FUSE_MAX_BACKGROUND &&
251 	       !list_empty(&fc->bg_queue)) {
252 		struct fuse_req *req;
253 
254 		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
255 		list_del(&req->list);
256 		fc->active_background++;
257 		queue_request(fc, req);
258 	}
259 }
260 
261 /*
262  * This function is called when a request is finished.  Either a reply
263  * has arrived or it was aborted (and not yet sent) or some error
264  * occurred during communication with userspace, or the device file
265  * was closed.  The requester thread is woken up (if still waiting),
266  * the 'end' callback is called if given, else the reference to the
267  * request is released
268  *
269  * Called with fc->lock, unlocks it
270  */
271 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
272 __releases(&fc->lock)
273 {
274 	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
275 	req->end = NULL;
276 	list_del(&req->list);
277 	list_del(&req->intr_entry);
278 	req->state = FUSE_REQ_FINISHED;
279 	if (req->background) {
280 		if (fc->num_background == FUSE_MAX_BACKGROUND) {
281 			fc->blocked = 0;
282 			wake_up_all(&fc->blocked_waitq);
283 		}
284 		if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
285 			clear_bdi_congested(&fc->bdi, READ);
286 			clear_bdi_congested(&fc->bdi, WRITE);
287 		}
288 		fc->num_background--;
289 		fc->active_background--;
290 		flush_bg_queue(fc);
291 	}
292 	spin_unlock(&fc->lock);
293 	wake_up(&req->waitq);
294 	if (end)
295 		end(fc, req);
296 	fuse_put_request(fc, req);
297 }
298 
299 static void wait_answer_interruptible(struct fuse_conn *fc,
300 				      struct fuse_req *req)
301 __releases(&fc->lock)
302 __acquires(&fc->lock)
303 {
304 	if (signal_pending(current))
305 		return;
306 
307 	spin_unlock(&fc->lock);
308 	wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
309 	spin_lock(&fc->lock);
310 }
311 
312 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
313 {
314 	list_add_tail(&req->intr_entry, &fc->interrupts);
315 	wake_up(&fc->waitq);
316 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
317 }
318 
319 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
320 __releases(&fc->lock)
321 __acquires(&fc->lock)
322 {
323 	if (!fc->no_interrupt) {
324 		/* Any signal may interrupt this */
325 		wait_answer_interruptible(fc, req);
326 
327 		if (req->aborted)
328 			goto aborted;
329 		if (req->state == FUSE_REQ_FINISHED)
330 			return;
331 
332 		req->interrupted = 1;
333 		if (req->state == FUSE_REQ_SENT)
334 			queue_interrupt(fc, req);
335 	}
336 
337 	if (!req->force) {
338 		sigset_t oldset;
339 
340 		/* Only fatal signals may interrupt this */
341 		block_sigs(&oldset);
342 		wait_answer_interruptible(fc, req);
343 		restore_sigs(&oldset);
344 
345 		if (req->aborted)
346 			goto aborted;
347 		if (req->state == FUSE_REQ_FINISHED)
348 			return;
349 
350 		/* Request is not yet in userspace, bail out */
351 		if (req->state == FUSE_REQ_PENDING) {
352 			list_del(&req->list);
353 			__fuse_put_request(req);
354 			req->out.h.error = -EINTR;
355 			return;
356 		}
357 	}
358 
359 	/*
360 	 * Either request is already in userspace, or it was forced.
361 	 * Wait it out.
362 	 */
363 	spin_unlock(&fc->lock);
364 	wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
365 	spin_lock(&fc->lock);
366 
367 	if (!req->aborted)
368 		return;
369 
370  aborted:
371 	BUG_ON(req->state != FUSE_REQ_FINISHED);
372 	if (req->locked) {
373 		/* This is uninterruptible sleep, because data is
374 		   being copied to/from the buffers of req.  During
375 		   locked state, there mustn't be any filesystem
376 		   operation (e.g. page fault), since that could lead
377 		   to deadlock */
378 		spin_unlock(&fc->lock);
379 		wait_event(req->waitq, !req->locked);
380 		spin_lock(&fc->lock);
381 	}
382 }
383 
384 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
385 {
386 	req->isreply = 1;
387 	spin_lock(&fc->lock);
388 	if (!fc->connected)
389 		req->out.h.error = -ENOTCONN;
390 	else if (fc->conn_error)
391 		req->out.h.error = -ECONNREFUSED;
392 	else {
393 		queue_request(fc, req);
394 		/* acquire extra reference, since request is still needed
395 		   after request_end() */
396 		__fuse_get_request(req);
397 
398 		request_wait_answer(fc, req);
399 	}
400 	spin_unlock(&fc->lock);
401 }
402 
403 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
404 					    struct fuse_req *req)
405 {
406 	req->background = 1;
407 	fc->num_background++;
408 	if (fc->num_background == FUSE_MAX_BACKGROUND)
409 		fc->blocked = 1;
410 	if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
411 		set_bdi_congested(&fc->bdi, READ);
412 		set_bdi_congested(&fc->bdi, WRITE);
413 	}
414 	list_add_tail(&req->list, &fc->bg_queue);
415 	flush_bg_queue(fc);
416 }
417 
418 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
419 {
420 	spin_lock(&fc->lock);
421 	if (fc->connected) {
422 		fuse_request_send_nowait_locked(fc, req);
423 		spin_unlock(&fc->lock);
424 	} else {
425 		req->out.h.error = -ENOTCONN;
426 		request_end(fc, req);
427 	}
428 }
429 
430 void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
431 {
432 	req->isreply = 0;
433 	fuse_request_send_nowait(fc, req);
434 }
435 
436 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
437 {
438 	req->isreply = 1;
439 	fuse_request_send_nowait(fc, req);
440 }
441 
442 /*
443  * Called under fc->lock
444  *
445  * fc->connected must have been checked previously
446  */
447 void fuse_request_send_background_locked(struct fuse_conn *fc,
448 					 struct fuse_req *req)
449 {
450 	req->isreply = 1;
451 	fuse_request_send_nowait_locked(fc, req);
452 }
453 
454 /*
455  * Lock the request.  Up to the next unlock_request() there mustn't be
456  * anything that could cause a page-fault.  If the request was already
457  * aborted bail out.
458  */
459 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
460 {
461 	int err = 0;
462 	if (req) {
463 		spin_lock(&fc->lock);
464 		if (req->aborted)
465 			err = -ENOENT;
466 		else
467 			req->locked = 1;
468 		spin_unlock(&fc->lock);
469 	}
470 	return err;
471 }
472 
473 /*
474  * Unlock request.  If it was aborted during being locked, the
475  * requester thread is currently waiting for it to be unlocked, so
476  * wake it up.
477  */
478 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
479 {
480 	if (req) {
481 		spin_lock(&fc->lock);
482 		req->locked = 0;
483 		if (req->aborted)
484 			wake_up(&req->waitq);
485 		spin_unlock(&fc->lock);
486 	}
487 }
488 
489 struct fuse_copy_state {
490 	struct fuse_conn *fc;
491 	int write;
492 	struct fuse_req *req;
493 	const struct iovec *iov;
494 	unsigned long nr_segs;
495 	unsigned long seglen;
496 	unsigned long addr;
497 	struct page *pg;
498 	void *mapaddr;
499 	void *buf;
500 	unsigned len;
501 };
502 
503 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
504 			   int write, struct fuse_req *req,
505 			   const struct iovec *iov, unsigned long nr_segs)
506 {
507 	memset(cs, 0, sizeof(*cs));
508 	cs->fc = fc;
509 	cs->write = write;
510 	cs->req = req;
511 	cs->iov = iov;
512 	cs->nr_segs = nr_segs;
513 }
514 
515 /* Unmap and put previous page of userspace buffer */
516 static void fuse_copy_finish(struct fuse_copy_state *cs)
517 {
518 	if (cs->mapaddr) {
519 		kunmap_atomic(cs->mapaddr, KM_USER0);
520 		if (cs->write) {
521 			flush_dcache_page(cs->pg);
522 			set_page_dirty_lock(cs->pg);
523 		}
524 		put_page(cs->pg);
525 		cs->mapaddr = NULL;
526 	}
527 }
528 
529 /*
530  * Get another pagefull of userspace buffer, and map it to kernel
531  * address space, and lock request
532  */
533 static int fuse_copy_fill(struct fuse_copy_state *cs)
534 {
535 	unsigned long offset;
536 	int err;
537 
538 	unlock_request(cs->fc, cs->req);
539 	fuse_copy_finish(cs);
540 	if (!cs->seglen) {
541 		BUG_ON(!cs->nr_segs);
542 		cs->seglen = cs->iov[0].iov_len;
543 		cs->addr = (unsigned long) cs->iov[0].iov_base;
544 		cs->iov++;
545 		cs->nr_segs--;
546 	}
547 	down_read(&current->mm->mmap_sem);
548 	err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
549 			     &cs->pg, NULL);
550 	up_read(&current->mm->mmap_sem);
551 	if (err < 0)
552 		return err;
553 	BUG_ON(err != 1);
554 	offset = cs->addr % PAGE_SIZE;
555 	cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
556 	cs->buf = cs->mapaddr + offset;
557 	cs->len = min(PAGE_SIZE - offset, cs->seglen);
558 	cs->seglen -= cs->len;
559 	cs->addr += cs->len;
560 
561 	return lock_request(cs->fc, cs->req);
562 }
563 
564 /* Do as much copy to/from userspace buffer as we can */
565 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
566 {
567 	unsigned ncpy = min(*size, cs->len);
568 	if (val) {
569 		if (cs->write)
570 			memcpy(cs->buf, *val, ncpy);
571 		else
572 			memcpy(*val, cs->buf, ncpy);
573 		*val += ncpy;
574 	}
575 	*size -= ncpy;
576 	cs->len -= ncpy;
577 	cs->buf += ncpy;
578 	return ncpy;
579 }
580 
581 /*
582  * Copy a page in the request to/from the userspace buffer.  Must be
583  * done atomically
584  */
585 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
586 			  unsigned offset, unsigned count, int zeroing)
587 {
588 	if (page && zeroing && count < PAGE_SIZE) {
589 		void *mapaddr = kmap_atomic(page, KM_USER1);
590 		memset(mapaddr, 0, PAGE_SIZE);
591 		kunmap_atomic(mapaddr, KM_USER1);
592 	}
593 	while (count) {
594 		if (!cs->len) {
595 			int err = fuse_copy_fill(cs);
596 			if (err)
597 				return err;
598 		}
599 		if (page) {
600 			void *mapaddr = kmap_atomic(page, KM_USER1);
601 			void *buf = mapaddr + offset;
602 			offset += fuse_copy_do(cs, &buf, &count);
603 			kunmap_atomic(mapaddr, KM_USER1);
604 		} else
605 			offset += fuse_copy_do(cs, NULL, &count);
606 	}
607 	if (page && !cs->write)
608 		flush_dcache_page(page);
609 	return 0;
610 }
611 
612 /* Copy pages in the request to/from userspace buffer */
613 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
614 			   int zeroing)
615 {
616 	unsigned i;
617 	struct fuse_req *req = cs->req;
618 	unsigned offset = req->page_offset;
619 	unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
620 
621 	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
622 		struct page *page = req->pages[i];
623 		int err = fuse_copy_page(cs, page, offset, count, zeroing);
624 		if (err)
625 			return err;
626 
627 		nbytes -= count;
628 		count = min(nbytes, (unsigned) PAGE_SIZE);
629 		offset = 0;
630 	}
631 	return 0;
632 }
633 
634 /* Copy a single argument in the request to/from userspace buffer */
635 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
636 {
637 	while (size) {
638 		if (!cs->len) {
639 			int err = fuse_copy_fill(cs);
640 			if (err)
641 				return err;
642 		}
643 		fuse_copy_do(cs, &val, &size);
644 	}
645 	return 0;
646 }
647 
648 /* Copy request arguments to/from userspace buffer */
649 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
650 			  unsigned argpages, struct fuse_arg *args,
651 			  int zeroing)
652 {
653 	int err = 0;
654 	unsigned i;
655 
656 	for (i = 0; !err && i < numargs; i++)  {
657 		struct fuse_arg *arg = &args[i];
658 		if (i == numargs - 1 && argpages)
659 			err = fuse_copy_pages(cs, arg->size, zeroing);
660 		else
661 			err = fuse_copy_one(cs, arg->value, arg->size);
662 	}
663 	return err;
664 }
665 
666 static int request_pending(struct fuse_conn *fc)
667 {
668 	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
669 }
670 
671 /* Wait until a request is available on the pending list */
672 static void request_wait(struct fuse_conn *fc)
673 __releases(&fc->lock)
674 __acquires(&fc->lock)
675 {
676 	DECLARE_WAITQUEUE(wait, current);
677 
678 	add_wait_queue_exclusive(&fc->waitq, &wait);
679 	while (fc->connected && !request_pending(fc)) {
680 		set_current_state(TASK_INTERRUPTIBLE);
681 		if (signal_pending(current))
682 			break;
683 
684 		spin_unlock(&fc->lock);
685 		schedule();
686 		spin_lock(&fc->lock);
687 	}
688 	set_current_state(TASK_RUNNING);
689 	remove_wait_queue(&fc->waitq, &wait);
690 }
691 
692 /*
693  * Transfer an interrupt request to userspace
694  *
695  * Unlike other requests this is assembled on demand, without a need
696  * to allocate a separate fuse_req structure.
697  *
698  * Called with fc->lock held, releases it
699  */
700 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
701 			       const struct iovec *iov, unsigned long nr_segs)
702 __releases(&fc->lock)
703 {
704 	struct fuse_copy_state cs;
705 	struct fuse_in_header ih;
706 	struct fuse_interrupt_in arg;
707 	unsigned reqsize = sizeof(ih) + sizeof(arg);
708 	int err;
709 
710 	list_del_init(&req->intr_entry);
711 	req->intr_unique = fuse_get_unique(fc);
712 	memset(&ih, 0, sizeof(ih));
713 	memset(&arg, 0, sizeof(arg));
714 	ih.len = reqsize;
715 	ih.opcode = FUSE_INTERRUPT;
716 	ih.unique = req->intr_unique;
717 	arg.unique = req->in.h.unique;
718 
719 	spin_unlock(&fc->lock);
720 	if (iov_length(iov, nr_segs) < reqsize)
721 		return -EINVAL;
722 
723 	fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
724 	err = fuse_copy_one(&cs, &ih, sizeof(ih));
725 	if (!err)
726 		err = fuse_copy_one(&cs, &arg, sizeof(arg));
727 	fuse_copy_finish(&cs);
728 
729 	return err ? err : reqsize;
730 }
731 
732 /*
733  * Read a single request into the userspace filesystem's buffer.  This
734  * function waits until a request is available, then removes it from
735  * the pending list and copies request data to userspace buffer.  If
736  * no reply is needed (FORGET) or request has been aborted or there
737  * was an error during the copying then it's finished by calling
738  * request_end().  Otherwise add it to the processing list, and set
739  * the 'sent' flag.
740  */
741 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
742 			      unsigned long nr_segs, loff_t pos)
743 {
744 	int err;
745 	struct fuse_req *req;
746 	struct fuse_in *in;
747 	struct fuse_copy_state cs;
748 	unsigned reqsize;
749 	struct file *file = iocb->ki_filp;
750 	struct fuse_conn *fc = fuse_get_conn(file);
751 	if (!fc)
752 		return -EPERM;
753 
754  restart:
755 	spin_lock(&fc->lock);
756 	err = -EAGAIN;
757 	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
758 	    !request_pending(fc))
759 		goto err_unlock;
760 
761 	request_wait(fc);
762 	err = -ENODEV;
763 	if (!fc->connected)
764 		goto err_unlock;
765 	err = -ERESTARTSYS;
766 	if (!request_pending(fc))
767 		goto err_unlock;
768 
769 	if (!list_empty(&fc->interrupts)) {
770 		req = list_entry(fc->interrupts.next, struct fuse_req,
771 				 intr_entry);
772 		return fuse_read_interrupt(fc, req, iov, nr_segs);
773 	}
774 
775 	req = list_entry(fc->pending.next, struct fuse_req, list);
776 	req->state = FUSE_REQ_READING;
777 	list_move(&req->list, &fc->io);
778 
779 	in = &req->in;
780 	reqsize = in->h.len;
781 	/* If request is too large, reply with an error and restart the read */
782 	if (iov_length(iov, nr_segs) < reqsize) {
783 		req->out.h.error = -EIO;
784 		/* SETXATTR is special, since it may contain too large data */
785 		if (in->h.opcode == FUSE_SETXATTR)
786 			req->out.h.error = -E2BIG;
787 		request_end(fc, req);
788 		goto restart;
789 	}
790 	spin_unlock(&fc->lock);
791 	fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
792 	err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
793 	if (!err)
794 		err = fuse_copy_args(&cs, in->numargs, in->argpages,
795 				     (struct fuse_arg *) in->args, 0);
796 	fuse_copy_finish(&cs);
797 	spin_lock(&fc->lock);
798 	req->locked = 0;
799 	if (req->aborted) {
800 		request_end(fc, req);
801 		return -ENODEV;
802 	}
803 	if (err) {
804 		req->out.h.error = -EIO;
805 		request_end(fc, req);
806 		return err;
807 	}
808 	if (!req->isreply)
809 		request_end(fc, req);
810 	else {
811 		req->state = FUSE_REQ_SENT;
812 		list_move_tail(&req->list, &fc->processing);
813 		if (req->interrupted)
814 			queue_interrupt(fc, req);
815 		spin_unlock(&fc->lock);
816 	}
817 	return reqsize;
818 
819  err_unlock:
820 	spin_unlock(&fc->lock);
821 	return err;
822 }
823 
824 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
825 			    struct fuse_copy_state *cs)
826 {
827 	struct fuse_notify_poll_wakeup_out outarg;
828 	int err;
829 
830 	if (size != sizeof(outarg))
831 		return -EINVAL;
832 
833 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
834 	if (err)
835 		return err;
836 
837 	return fuse_notify_poll_wakeup(fc, &outarg);
838 }
839 
840 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
841 		       unsigned int size, struct fuse_copy_state *cs)
842 {
843 	switch (code) {
844 	case FUSE_NOTIFY_POLL:
845 		return fuse_notify_poll(fc, size, cs);
846 
847 	default:
848 		return -EINVAL;
849 	}
850 }
851 
852 /* Look up request on processing list by unique ID */
853 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
854 {
855 	struct list_head *entry;
856 
857 	list_for_each(entry, &fc->processing) {
858 		struct fuse_req *req;
859 		req = list_entry(entry, struct fuse_req, list);
860 		if (req->in.h.unique == unique || req->intr_unique == unique)
861 			return req;
862 	}
863 	return NULL;
864 }
865 
866 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
867 			 unsigned nbytes)
868 {
869 	unsigned reqsize = sizeof(struct fuse_out_header);
870 
871 	if (out->h.error)
872 		return nbytes != reqsize ? -EINVAL : 0;
873 
874 	reqsize += len_args(out->numargs, out->args);
875 
876 	if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
877 		return -EINVAL;
878 	else if (reqsize > nbytes) {
879 		struct fuse_arg *lastarg = &out->args[out->numargs-1];
880 		unsigned diffsize = reqsize - nbytes;
881 		if (diffsize > lastarg->size)
882 			return -EINVAL;
883 		lastarg->size -= diffsize;
884 	}
885 	return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
886 			      out->page_zeroing);
887 }
888 
889 /*
890  * Write a single reply to a request.  First the header is copied from
891  * the write buffer.  The request is then searched on the processing
892  * list by the unique ID found in the header.  If found, then remove
893  * it from the list and copy the rest of the buffer to the request.
894  * The request is finished by calling request_end()
895  */
896 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
897 			       unsigned long nr_segs, loff_t pos)
898 {
899 	int err;
900 	unsigned nbytes = iov_length(iov, nr_segs);
901 	struct fuse_req *req;
902 	struct fuse_out_header oh;
903 	struct fuse_copy_state cs;
904 	struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
905 	if (!fc)
906 		return -EPERM;
907 
908 	fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
909 	if (nbytes < sizeof(struct fuse_out_header))
910 		return -EINVAL;
911 
912 	err = fuse_copy_one(&cs, &oh, sizeof(oh));
913 	if (err)
914 		goto err_finish;
915 
916 	err = -EINVAL;
917 	if (oh.len != nbytes)
918 		goto err_finish;
919 
920 	/*
921 	 * Zero oh.unique indicates unsolicited notification message
922 	 * and error contains notification code.
923 	 */
924 	if (!oh.unique) {
925 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
926 		fuse_copy_finish(&cs);
927 		return err ? err : nbytes;
928 	}
929 
930 	err = -EINVAL;
931 	if (oh.error <= -1000 || oh.error > 0)
932 		goto err_finish;
933 
934 	spin_lock(&fc->lock);
935 	err = -ENOENT;
936 	if (!fc->connected)
937 		goto err_unlock;
938 
939 	req = request_find(fc, oh.unique);
940 	if (!req)
941 		goto err_unlock;
942 
943 	if (req->aborted) {
944 		spin_unlock(&fc->lock);
945 		fuse_copy_finish(&cs);
946 		spin_lock(&fc->lock);
947 		request_end(fc, req);
948 		return -ENOENT;
949 	}
950 	/* Is it an interrupt reply? */
951 	if (req->intr_unique == oh.unique) {
952 		err = -EINVAL;
953 		if (nbytes != sizeof(struct fuse_out_header))
954 			goto err_unlock;
955 
956 		if (oh.error == -ENOSYS)
957 			fc->no_interrupt = 1;
958 		else if (oh.error == -EAGAIN)
959 			queue_interrupt(fc, req);
960 
961 		spin_unlock(&fc->lock);
962 		fuse_copy_finish(&cs);
963 		return nbytes;
964 	}
965 
966 	req->state = FUSE_REQ_WRITING;
967 	list_move(&req->list, &fc->io);
968 	req->out.h = oh;
969 	req->locked = 1;
970 	cs.req = req;
971 	spin_unlock(&fc->lock);
972 
973 	err = copy_out_args(&cs, &req->out, nbytes);
974 	fuse_copy_finish(&cs);
975 
976 	spin_lock(&fc->lock);
977 	req->locked = 0;
978 	if (!err) {
979 		if (req->aborted)
980 			err = -ENOENT;
981 	} else if (!req->aborted)
982 		req->out.h.error = -EIO;
983 	request_end(fc, req);
984 
985 	return err ? err : nbytes;
986 
987  err_unlock:
988 	spin_unlock(&fc->lock);
989  err_finish:
990 	fuse_copy_finish(&cs);
991 	return err;
992 }
993 
994 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
995 {
996 	unsigned mask = POLLOUT | POLLWRNORM;
997 	struct fuse_conn *fc = fuse_get_conn(file);
998 	if (!fc)
999 		return POLLERR;
1000 
1001 	poll_wait(file, &fc->waitq, wait);
1002 
1003 	spin_lock(&fc->lock);
1004 	if (!fc->connected)
1005 		mask = POLLERR;
1006 	else if (request_pending(fc))
1007 		mask |= POLLIN | POLLRDNORM;
1008 	spin_unlock(&fc->lock);
1009 
1010 	return mask;
1011 }
1012 
1013 /*
1014  * Abort all requests on the given list (pending or processing)
1015  *
1016  * This function releases and reacquires fc->lock
1017  */
1018 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1019 __releases(&fc->lock)
1020 __acquires(&fc->lock)
1021 {
1022 	while (!list_empty(head)) {
1023 		struct fuse_req *req;
1024 		req = list_entry(head->next, struct fuse_req, list);
1025 		req->out.h.error = -ECONNABORTED;
1026 		request_end(fc, req);
1027 		spin_lock(&fc->lock);
1028 	}
1029 }
1030 
1031 /*
1032  * Abort requests under I/O
1033  *
1034  * The requests are set to aborted and finished, and the request
1035  * waiter is woken up.  This will make request_wait_answer() wait
1036  * until the request is unlocked and then return.
1037  *
1038  * If the request is asynchronous, then the end function needs to be
1039  * called after waiting for the request to be unlocked (if it was
1040  * locked).
1041  */
1042 static void end_io_requests(struct fuse_conn *fc)
1043 __releases(&fc->lock)
1044 __acquires(&fc->lock)
1045 {
1046 	while (!list_empty(&fc->io)) {
1047 		struct fuse_req *req =
1048 			list_entry(fc->io.next, struct fuse_req, list);
1049 		void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1050 
1051 		req->aborted = 1;
1052 		req->out.h.error = -ECONNABORTED;
1053 		req->state = FUSE_REQ_FINISHED;
1054 		list_del_init(&req->list);
1055 		wake_up(&req->waitq);
1056 		if (end) {
1057 			req->end = NULL;
1058 			__fuse_get_request(req);
1059 			spin_unlock(&fc->lock);
1060 			wait_event(req->waitq, !req->locked);
1061 			end(fc, req);
1062 			fuse_put_request(fc, req);
1063 			spin_lock(&fc->lock);
1064 		}
1065 	}
1066 }
1067 
1068 /*
1069  * Abort all requests.
1070  *
1071  * Emergency exit in case of a malicious or accidental deadlock, or
1072  * just a hung filesystem.
1073  *
1074  * The same effect is usually achievable through killing the
1075  * filesystem daemon and all users of the filesystem.  The exception
1076  * is the combination of an asynchronous request and the tricky
1077  * deadlock (see Documentation/filesystems/fuse.txt).
1078  *
1079  * During the aborting, progression of requests from the pending and
1080  * processing lists onto the io list, and progression of new requests
1081  * onto the pending list is prevented by req->connected being false.
1082  *
1083  * Progression of requests under I/O to the processing list is
1084  * prevented by the req->aborted flag being true for these requests.
1085  * For this reason requests on the io list must be aborted first.
1086  */
1087 void fuse_abort_conn(struct fuse_conn *fc)
1088 {
1089 	spin_lock(&fc->lock);
1090 	if (fc->connected) {
1091 		fc->connected = 0;
1092 		fc->blocked = 0;
1093 		end_io_requests(fc);
1094 		end_requests(fc, &fc->pending);
1095 		end_requests(fc, &fc->processing);
1096 		wake_up_all(&fc->waitq);
1097 		wake_up_all(&fc->blocked_waitq);
1098 		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1099 	}
1100 	spin_unlock(&fc->lock);
1101 }
1102 
1103 static int fuse_dev_release(struct inode *inode, struct file *file)
1104 {
1105 	struct fuse_conn *fc = fuse_get_conn(file);
1106 	if (fc) {
1107 		spin_lock(&fc->lock);
1108 		fc->connected = 0;
1109 		end_requests(fc, &fc->pending);
1110 		end_requests(fc, &fc->processing);
1111 		spin_unlock(&fc->lock);
1112 		fuse_conn_put(fc);
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 static int fuse_dev_fasync(int fd, struct file *file, int on)
1119 {
1120 	struct fuse_conn *fc = fuse_get_conn(file);
1121 	if (!fc)
1122 		return -EPERM;
1123 
1124 	/* No locking - fasync_helper does its own locking */
1125 	return fasync_helper(fd, file, on, &fc->fasync);
1126 }
1127 
1128 const struct file_operations fuse_dev_operations = {
1129 	.owner		= THIS_MODULE,
1130 	.llseek		= no_llseek,
1131 	.read		= do_sync_read,
1132 	.aio_read	= fuse_dev_read,
1133 	.write		= do_sync_write,
1134 	.aio_write	= fuse_dev_write,
1135 	.poll		= fuse_dev_poll,
1136 	.release	= fuse_dev_release,
1137 	.fasync		= fuse_dev_fasync,
1138 };
1139 
1140 static struct miscdevice fuse_miscdevice = {
1141 	.minor = FUSE_MINOR,
1142 	.name  = "fuse",
1143 	.fops = &fuse_dev_operations,
1144 };
1145 
1146 int __init fuse_dev_init(void)
1147 {
1148 	int err = -ENOMEM;
1149 	fuse_req_cachep = kmem_cache_create("fuse_request",
1150 					    sizeof(struct fuse_req),
1151 					    0, 0, NULL);
1152 	if (!fuse_req_cachep)
1153 		goto out;
1154 
1155 	err = misc_register(&fuse_miscdevice);
1156 	if (err)
1157 		goto out_cache_clean;
1158 
1159 	return 0;
1160 
1161  out_cache_clean:
1162 	kmem_cache_destroy(fuse_req_cachep);
1163  out:
1164 	return err;
1165 }
1166 
1167 void fuse_dev_cleanup(void)
1168 {
1169 	misc_deregister(&fuse_miscdevice);
1170 	kmem_cache_destroy(fuse_req_cachep);
1171 }
1172