xref: /linux/fs/pipe.c (revision 80483c3abf8423e6ec4fb63647a8e2e1f5976801)
1 /*
2  *  linux/fs/pipe.c
3  *
4  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/log2.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/uio.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/audit.h>
22 #include <linux/syscalls.h>
23 #include <linux/fcntl.h>
24 #include <linux/memcontrol.h>
25 
26 #include <asm/uaccess.h>
27 #include <asm/ioctls.h>
28 
29 #include "internal.h"
30 
31 /*
32  * The max size that a non-root user is allowed to grow the pipe. Can
33  * be set by root in /proc/sys/fs/pipe-max-size
34  */
35 unsigned int pipe_max_size = 1048576;
36 
37 /*
38  * Minimum pipe size, as required by POSIX
39  */
40 unsigned int pipe_min_size = PAGE_SIZE;
41 
42 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
43  * matches default values.
44  */
45 unsigned long pipe_user_pages_hard;
46 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
47 
48 /*
49  * We use a start+len construction, which provides full use of the
50  * allocated memory.
51  * -- Florian Coosmann (FGC)
52  *
53  * Reads with count = 0 should always return 0.
54  * -- Julian Bradfield 1999-06-07.
55  *
56  * FIFOs and Pipes now generate SIGIO for both readers and writers.
57  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
58  *
59  * pipe_read & write cleanup
60  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
61  */
62 
63 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
64 {
65 	if (pipe->files)
66 		mutex_lock_nested(&pipe->mutex, subclass);
67 }
68 
69 void pipe_lock(struct pipe_inode_info *pipe)
70 {
71 	/*
72 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
73 	 */
74 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
75 }
76 EXPORT_SYMBOL(pipe_lock);
77 
78 void pipe_unlock(struct pipe_inode_info *pipe)
79 {
80 	if (pipe->files)
81 		mutex_unlock(&pipe->mutex);
82 }
83 EXPORT_SYMBOL(pipe_unlock);
84 
85 static inline void __pipe_lock(struct pipe_inode_info *pipe)
86 {
87 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
88 }
89 
90 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
91 {
92 	mutex_unlock(&pipe->mutex);
93 }
94 
95 void pipe_double_lock(struct pipe_inode_info *pipe1,
96 		      struct pipe_inode_info *pipe2)
97 {
98 	BUG_ON(pipe1 == pipe2);
99 
100 	if (pipe1 < pipe2) {
101 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
102 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
103 	} else {
104 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
105 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
106 	}
107 }
108 
109 /* Drop the inode semaphore and wait for a pipe event, atomically */
110 void pipe_wait(struct pipe_inode_info *pipe)
111 {
112 	DEFINE_WAIT(wait);
113 
114 	/*
115 	 * Pipes are system-local resources, so sleeping on them
116 	 * is considered a noninteractive wait:
117 	 */
118 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
119 	pipe_unlock(pipe);
120 	schedule();
121 	finish_wait(&pipe->wait, &wait);
122 	pipe_lock(pipe);
123 }
124 
125 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
126 				  struct pipe_buffer *buf)
127 {
128 	struct page *page = buf->page;
129 
130 	/*
131 	 * If nobody else uses this page, and we don't already have a
132 	 * temporary page, let's keep track of it as a one-deep
133 	 * allocation cache. (Otherwise just release our reference to it)
134 	 */
135 	if (page_count(page) == 1 && !pipe->tmp_page)
136 		pipe->tmp_page = page;
137 	else
138 		put_page(page);
139 }
140 
141 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
142 			       struct pipe_buffer *buf)
143 {
144 	struct page *page = buf->page;
145 
146 	if (page_count(page) == 1) {
147 		if (memcg_kmem_enabled()) {
148 			memcg_kmem_uncharge(page, 0);
149 			__ClearPageKmemcg(page);
150 		}
151 		__SetPageLocked(page);
152 		return 0;
153 	}
154 	return 1;
155 }
156 
157 /**
158  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
159  * @pipe:	the pipe that the buffer belongs to
160  * @buf:	the buffer to attempt to steal
161  *
162  * Description:
163  *	This function attempts to steal the &struct page attached to
164  *	@buf. If successful, this function returns 0 and returns with
165  *	the page locked. The caller may then reuse the page for whatever
166  *	he wishes; the typical use is insertion into a different file
167  *	page cache.
168  */
169 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
170 			   struct pipe_buffer *buf)
171 {
172 	struct page *page = buf->page;
173 
174 	/*
175 	 * A reference of one is golden, that means that the owner of this
176 	 * page is the only one holding a reference to it. lock the page
177 	 * and return OK.
178 	 */
179 	if (page_count(page) == 1) {
180 		lock_page(page);
181 		return 0;
182 	}
183 
184 	return 1;
185 }
186 EXPORT_SYMBOL(generic_pipe_buf_steal);
187 
188 /**
189  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
190  * @pipe:	the pipe that the buffer belongs to
191  * @buf:	the buffer to get a reference to
192  *
193  * Description:
194  *	This function grabs an extra reference to @buf. It's used in
195  *	in the tee() system call, when we duplicate the buffers in one
196  *	pipe into another.
197  */
198 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
199 {
200 	get_page(buf->page);
201 }
202 EXPORT_SYMBOL(generic_pipe_buf_get);
203 
204 /**
205  * generic_pipe_buf_confirm - verify contents of the pipe buffer
206  * @info:	the pipe that the buffer belongs to
207  * @buf:	the buffer to confirm
208  *
209  * Description:
210  *	This function does nothing, because the generic pipe code uses
211  *	pages that are always good when inserted into the pipe.
212  */
213 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
214 			     struct pipe_buffer *buf)
215 {
216 	return 0;
217 }
218 EXPORT_SYMBOL(generic_pipe_buf_confirm);
219 
220 /**
221  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
222  * @pipe:	the pipe that the buffer belongs to
223  * @buf:	the buffer to put a reference to
224  *
225  * Description:
226  *	This function releases a reference to @buf.
227  */
228 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
229 			      struct pipe_buffer *buf)
230 {
231 	put_page(buf->page);
232 }
233 EXPORT_SYMBOL(generic_pipe_buf_release);
234 
235 static const struct pipe_buf_operations anon_pipe_buf_ops = {
236 	.can_merge = 1,
237 	.confirm = generic_pipe_buf_confirm,
238 	.release = anon_pipe_buf_release,
239 	.steal = anon_pipe_buf_steal,
240 	.get = generic_pipe_buf_get,
241 };
242 
243 static const struct pipe_buf_operations packet_pipe_buf_ops = {
244 	.can_merge = 0,
245 	.confirm = generic_pipe_buf_confirm,
246 	.release = anon_pipe_buf_release,
247 	.steal = anon_pipe_buf_steal,
248 	.get = generic_pipe_buf_get,
249 };
250 
251 static ssize_t
252 pipe_read(struct kiocb *iocb, struct iov_iter *to)
253 {
254 	size_t total_len = iov_iter_count(to);
255 	struct file *filp = iocb->ki_filp;
256 	struct pipe_inode_info *pipe = filp->private_data;
257 	int do_wakeup;
258 	ssize_t ret;
259 
260 	/* Null read succeeds. */
261 	if (unlikely(total_len == 0))
262 		return 0;
263 
264 	do_wakeup = 0;
265 	ret = 0;
266 	__pipe_lock(pipe);
267 	for (;;) {
268 		int bufs = pipe->nrbufs;
269 		if (bufs) {
270 			int curbuf = pipe->curbuf;
271 			struct pipe_buffer *buf = pipe->bufs + curbuf;
272 			const struct pipe_buf_operations *ops = buf->ops;
273 			size_t chars = buf->len;
274 			size_t written;
275 			int error;
276 
277 			if (chars > total_len)
278 				chars = total_len;
279 
280 			error = ops->confirm(pipe, buf);
281 			if (error) {
282 				if (!ret)
283 					ret = error;
284 				break;
285 			}
286 
287 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
288 			if (unlikely(written < chars)) {
289 				if (!ret)
290 					ret = -EFAULT;
291 				break;
292 			}
293 			ret += chars;
294 			buf->offset += chars;
295 			buf->len -= chars;
296 
297 			/* Was it a packet buffer? Clean up and exit */
298 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
299 				total_len = chars;
300 				buf->len = 0;
301 			}
302 
303 			if (!buf->len) {
304 				buf->ops = NULL;
305 				ops->release(pipe, buf);
306 				curbuf = (curbuf + 1) & (pipe->buffers - 1);
307 				pipe->curbuf = curbuf;
308 				pipe->nrbufs = --bufs;
309 				do_wakeup = 1;
310 			}
311 			total_len -= chars;
312 			if (!total_len)
313 				break;	/* common path: read succeeded */
314 		}
315 		if (bufs)	/* More to do? */
316 			continue;
317 		if (!pipe->writers)
318 			break;
319 		if (!pipe->waiting_writers) {
320 			/* syscall merging: Usually we must not sleep
321 			 * if O_NONBLOCK is set, or if we got some data.
322 			 * But if a writer sleeps in kernel space, then
323 			 * we can wait for that data without violating POSIX.
324 			 */
325 			if (ret)
326 				break;
327 			if (filp->f_flags & O_NONBLOCK) {
328 				ret = -EAGAIN;
329 				break;
330 			}
331 		}
332 		if (signal_pending(current)) {
333 			if (!ret)
334 				ret = -ERESTARTSYS;
335 			break;
336 		}
337 		if (do_wakeup) {
338 			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
339  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
340 		}
341 		pipe_wait(pipe);
342 	}
343 	__pipe_unlock(pipe);
344 
345 	/* Signal writers asynchronously that there is more room. */
346 	if (do_wakeup) {
347 		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
348 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
349 	}
350 	if (ret > 0)
351 		file_accessed(filp);
352 	return ret;
353 }
354 
355 static inline int is_packetized(struct file *file)
356 {
357 	return (file->f_flags & O_DIRECT) != 0;
358 }
359 
360 static ssize_t
361 pipe_write(struct kiocb *iocb, struct iov_iter *from)
362 {
363 	struct file *filp = iocb->ki_filp;
364 	struct pipe_inode_info *pipe = filp->private_data;
365 	ssize_t ret = 0;
366 	int do_wakeup = 0;
367 	size_t total_len = iov_iter_count(from);
368 	ssize_t chars;
369 
370 	/* Null write succeeds. */
371 	if (unlikely(total_len == 0))
372 		return 0;
373 
374 	__pipe_lock(pipe);
375 
376 	if (!pipe->readers) {
377 		send_sig(SIGPIPE, current, 0);
378 		ret = -EPIPE;
379 		goto out;
380 	}
381 
382 	/* We try to merge small writes */
383 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
384 	if (pipe->nrbufs && chars != 0) {
385 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
386 							(pipe->buffers - 1);
387 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
388 		const struct pipe_buf_operations *ops = buf->ops;
389 		int offset = buf->offset + buf->len;
390 
391 		if (ops->can_merge && offset + chars <= PAGE_SIZE) {
392 			ret = ops->confirm(pipe, buf);
393 			if (ret)
394 				goto out;
395 
396 			ret = copy_page_from_iter(buf->page, offset, chars, from);
397 			if (unlikely(ret < chars)) {
398 				ret = -EFAULT;
399 				goto out;
400 			}
401 			do_wakeup = 1;
402 			buf->len += ret;
403 			if (!iov_iter_count(from))
404 				goto out;
405 		}
406 	}
407 
408 	for (;;) {
409 		int bufs;
410 
411 		if (!pipe->readers) {
412 			send_sig(SIGPIPE, current, 0);
413 			if (!ret)
414 				ret = -EPIPE;
415 			break;
416 		}
417 		bufs = pipe->nrbufs;
418 		if (bufs < pipe->buffers) {
419 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
420 			struct pipe_buffer *buf = pipe->bufs + newbuf;
421 			struct page *page = pipe->tmp_page;
422 			int copied;
423 
424 			if (!page) {
425 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
426 				if (unlikely(!page)) {
427 					ret = ret ? : -ENOMEM;
428 					break;
429 				}
430 				pipe->tmp_page = page;
431 			}
432 			/* Always wake up, even if the copy fails. Otherwise
433 			 * we lock up (O_NONBLOCK-)readers that sleep due to
434 			 * syscall merging.
435 			 * FIXME! Is this really true?
436 			 */
437 			do_wakeup = 1;
438 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
439 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
440 				if (!ret)
441 					ret = -EFAULT;
442 				break;
443 			}
444 			ret += copied;
445 
446 			/* Insert it into the buffer array */
447 			buf->page = page;
448 			buf->ops = &anon_pipe_buf_ops;
449 			buf->offset = 0;
450 			buf->len = copied;
451 			buf->flags = 0;
452 			if (is_packetized(filp)) {
453 				buf->ops = &packet_pipe_buf_ops;
454 				buf->flags = PIPE_BUF_FLAG_PACKET;
455 			}
456 			pipe->nrbufs = ++bufs;
457 			pipe->tmp_page = NULL;
458 
459 			if (!iov_iter_count(from))
460 				break;
461 		}
462 		if (bufs < pipe->buffers)
463 			continue;
464 		if (filp->f_flags & O_NONBLOCK) {
465 			if (!ret)
466 				ret = -EAGAIN;
467 			break;
468 		}
469 		if (signal_pending(current)) {
470 			if (!ret)
471 				ret = -ERESTARTSYS;
472 			break;
473 		}
474 		if (do_wakeup) {
475 			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
476 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
477 			do_wakeup = 0;
478 		}
479 		pipe->waiting_writers++;
480 		pipe_wait(pipe);
481 		pipe->waiting_writers--;
482 	}
483 out:
484 	__pipe_unlock(pipe);
485 	if (do_wakeup) {
486 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
487 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
488 	}
489 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
490 		int err = file_update_time(filp);
491 		if (err)
492 			ret = err;
493 		sb_end_write(file_inode(filp)->i_sb);
494 	}
495 	return ret;
496 }
497 
498 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
499 {
500 	struct pipe_inode_info *pipe = filp->private_data;
501 	int count, buf, nrbufs;
502 
503 	switch (cmd) {
504 		case FIONREAD:
505 			__pipe_lock(pipe);
506 			count = 0;
507 			buf = pipe->curbuf;
508 			nrbufs = pipe->nrbufs;
509 			while (--nrbufs >= 0) {
510 				count += pipe->bufs[buf].len;
511 				buf = (buf+1) & (pipe->buffers - 1);
512 			}
513 			__pipe_unlock(pipe);
514 
515 			return put_user(count, (int __user *)arg);
516 		default:
517 			return -ENOIOCTLCMD;
518 	}
519 }
520 
521 /* No kernel lock held - fine */
522 static unsigned int
523 pipe_poll(struct file *filp, poll_table *wait)
524 {
525 	unsigned int mask;
526 	struct pipe_inode_info *pipe = filp->private_data;
527 	int nrbufs;
528 
529 	poll_wait(filp, &pipe->wait, wait);
530 
531 	/* Reading only -- no need for acquiring the semaphore.  */
532 	nrbufs = pipe->nrbufs;
533 	mask = 0;
534 	if (filp->f_mode & FMODE_READ) {
535 		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
536 		if (!pipe->writers && filp->f_version != pipe->w_counter)
537 			mask |= POLLHUP;
538 	}
539 
540 	if (filp->f_mode & FMODE_WRITE) {
541 		mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
542 		/*
543 		 * Most Unices do not set POLLERR for FIFOs but on Linux they
544 		 * behave exactly like pipes for poll().
545 		 */
546 		if (!pipe->readers)
547 			mask |= POLLERR;
548 	}
549 
550 	return mask;
551 }
552 
553 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
554 {
555 	int kill = 0;
556 
557 	spin_lock(&inode->i_lock);
558 	if (!--pipe->files) {
559 		inode->i_pipe = NULL;
560 		kill = 1;
561 	}
562 	spin_unlock(&inode->i_lock);
563 
564 	if (kill)
565 		free_pipe_info(pipe);
566 }
567 
568 static int
569 pipe_release(struct inode *inode, struct file *file)
570 {
571 	struct pipe_inode_info *pipe = file->private_data;
572 
573 	__pipe_lock(pipe);
574 	if (file->f_mode & FMODE_READ)
575 		pipe->readers--;
576 	if (file->f_mode & FMODE_WRITE)
577 		pipe->writers--;
578 
579 	if (pipe->readers || pipe->writers) {
580 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
581 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
582 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
583 	}
584 	__pipe_unlock(pipe);
585 
586 	put_pipe_info(inode, pipe);
587 	return 0;
588 }
589 
590 static int
591 pipe_fasync(int fd, struct file *filp, int on)
592 {
593 	struct pipe_inode_info *pipe = filp->private_data;
594 	int retval = 0;
595 
596 	__pipe_lock(pipe);
597 	if (filp->f_mode & FMODE_READ)
598 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
599 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
600 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
601 		if (retval < 0 && (filp->f_mode & FMODE_READ))
602 			/* this can happen only if on == T */
603 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
604 	}
605 	__pipe_unlock(pipe);
606 	return retval;
607 }
608 
609 static void account_pipe_buffers(struct pipe_inode_info *pipe,
610                                  unsigned long old, unsigned long new)
611 {
612 	atomic_long_add(new - old, &pipe->user->pipe_bufs);
613 }
614 
615 static bool too_many_pipe_buffers_soft(struct user_struct *user)
616 {
617 	return pipe_user_pages_soft &&
618 	       atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
619 }
620 
621 static bool too_many_pipe_buffers_hard(struct user_struct *user)
622 {
623 	return pipe_user_pages_hard &&
624 	       atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
625 }
626 
627 struct pipe_inode_info *alloc_pipe_info(void)
628 {
629 	struct pipe_inode_info *pipe;
630 
631 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
632 	if (pipe) {
633 		unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
634 		struct user_struct *user = get_current_user();
635 
636 		if (!too_many_pipe_buffers_hard(user)) {
637 			if (too_many_pipe_buffers_soft(user))
638 				pipe_bufs = 1;
639 			pipe->bufs = kcalloc(pipe_bufs,
640 					     sizeof(struct pipe_buffer),
641 					     GFP_KERNEL_ACCOUNT);
642 		}
643 
644 		if (pipe->bufs) {
645 			init_waitqueue_head(&pipe->wait);
646 			pipe->r_counter = pipe->w_counter = 1;
647 			pipe->buffers = pipe_bufs;
648 			pipe->user = user;
649 			account_pipe_buffers(pipe, 0, pipe_bufs);
650 			mutex_init(&pipe->mutex);
651 			return pipe;
652 		}
653 		free_uid(user);
654 		kfree(pipe);
655 	}
656 
657 	return NULL;
658 }
659 
660 void free_pipe_info(struct pipe_inode_info *pipe)
661 {
662 	int i;
663 
664 	account_pipe_buffers(pipe, pipe->buffers, 0);
665 	free_uid(pipe->user);
666 	for (i = 0; i < pipe->buffers; i++) {
667 		struct pipe_buffer *buf = pipe->bufs + i;
668 		if (buf->ops)
669 			buf->ops->release(pipe, buf);
670 	}
671 	if (pipe->tmp_page)
672 		__free_page(pipe->tmp_page);
673 	kfree(pipe->bufs);
674 	kfree(pipe);
675 }
676 
677 static struct vfsmount *pipe_mnt __read_mostly;
678 
679 /*
680  * pipefs_dname() is called from d_path().
681  */
682 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
683 {
684 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
685 				d_inode(dentry)->i_ino);
686 }
687 
688 static const struct dentry_operations pipefs_dentry_operations = {
689 	.d_dname	= pipefs_dname,
690 };
691 
692 static struct inode * get_pipe_inode(void)
693 {
694 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
695 	struct pipe_inode_info *pipe;
696 
697 	if (!inode)
698 		goto fail_inode;
699 
700 	inode->i_ino = get_next_ino();
701 
702 	pipe = alloc_pipe_info();
703 	if (!pipe)
704 		goto fail_iput;
705 
706 	inode->i_pipe = pipe;
707 	pipe->files = 2;
708 	pipe->readers = pipe->writers = 1;
709 	inode->i_fop = &pipefifo_fops;
710 
711 	/*
712 	 * Mark the inode dirty from the very beginning,
713 	 * that way it will never be moved to the dirty
714 	 * list because "mark_inode_dirty()" will think
715 	 * that it already _is_ on the dirty list.
716 	 */
717 	inode->i_state = I_DIRTY;
718 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
719 	inode->i_uid = current_fsuid();
720 	inode->i_gid = current_fsgid();
721 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
722 
723 	return inode;
724 
725 fail_iput:
726 	iput(inode);
727 
728 fail_inode:
729 	return NULL;
730 }
731 
732 int create_pipe_files(struct file **res, int flags)
733 {
734 	int err;
735 	struct inode *inode = get_pipe_inode();
736 	struct file *f;
737 	struct path path;
738 	static struct qstr name = { .name = "" };
739 
740 	if (!inode)
741 		return -ENFILE;
742 
743 	err = -ENOMEM;
744 	path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
745 	if (!path.dentry)
746 		goto err_inode;
747 	path.mnt = mntget(pipe_mnt);
748 
749 	d_instantiate(path.dentry, inode);
750 
751 	f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
752 	if (IS_ERR(f)) {
753 		err = PTR_ERR(f);
754 		goto err_dentry;
755 	}
756 
757 	f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
758 	f->private_data = inode->i_pipe;
759 
760 	res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
761 	if (IS_ERR(res[0])) {
762 		err = PTR_ERR(res[0]);
763 		goto err_file;
764 	}
765 
766 	path_get(&path);
767 	res[0]->private_data = inode->i_pipe;
768 	res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
769 	res[1] = f;
770 	return 0;
771 
772 err_file:
773 	put_filp(f);
774 err_dentry:
775 	free_pipe_info(inode->i_pipe);
776 	path_put(&path);
777 	return err;
778 
779 err_inode:
780 	free_pipe_info(inode->i_pipe);
781 	iput(inode);
782 	return err;
783 }
784 
785 static int __do_pipe_flags(int *fd, struct file **files, int flags)
786 {
787 	int error;
788 	int fdw, fdr;
789 
790 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
791 		return -EINVAL;
792 
793 	error = create_pipe_files(files, flags);
794 	if (error)
795 		return error;
796 
797 	error = get_unused_fd_flags(flags);
798 	if (error < 0)
799 		goto err_read_pipe;
800 	fdr = error;
801 
802 	error = get_unused_fd_flags(flags);
803 	if (error < 0)
804 		goto err_fdr;
805 	fdw = error;
806 
807 	audit_fd_pair(fdr, fdw);
808 	fd[0] = fdr;
809 	fd[1] = fdw;
810 	return 0;
811 
812  err_fdr:
813 	put_unused_fd(fdr);
814  err_read_pipe:
815 	fput(files[0]);
816 	fput(files[1]);
817 	return error;
818 }
819 
820 int do_pipe_flags(int *fd, int flags)
821 {
822 	struct file *files[2];
823 	int error = __do_pipe_flags(fd, files, flags);
824 	if (!error) {
825 		fd_install(fd[0], files[0]);
826 		fd_install(fd[1], files[1]);
827 	}
828 	return error;
829 }
830 
831 /*
832  * sys_pipe() is the normal C calling standard for creating
833  * a pipe. It's not the way Unix traditionally does this, though.
834  */
835 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
836 {
837 	struct file *files[2];
838 	int fd[2];
839 	int error;
840 
841 	error = __do_pipe_flags(fd, files, flags);
842 	if (!error) {
843 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
844 			fput(files[0]);
845 			fput(files[1]);
846 			put_unused_fd(fd[0]);
847 			put_unused_fd(fd[1]);
848 			error = -EFAULT;
849 		} else {
850 			fd_install(fd[0], files[0]);
851 			fd_install(fd[1], files[1]);
852 		}
853 	}
854 	return error;
855 }
856 
857 SYSCALL_DEFINE1(pipe, int __user *, fildes)
858 {
859 	return sys_pipe2(fildes, 0);
860 }
861 
862 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
863 {
864 	int cur = *cnt;
865 
866 	while (cur == *cnt) {
867 		pipe_wait(pipe);
868 		if (signal_pending(current))
869 			break;
870 	}
871 	return cur == *cnt ? -ERESTARTSYS : 0;
872 }
873 
874 static void wake_up_partner(struct pipe_inode_info *pipe)
875 {
876 	wake_up_interruptible(&pipe->wait);
877 }
878 
879 static int fifo_open(struct inode *inode, struct file *filp)
880 {
881 	struct pipe_inode_info *pipe;
882 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
883 	int ret;
884 
885 	filp->f_version = 0;
886 
887 	spin_lock(&inode->i_lock);
888 	if (inode->i_pipe) {
889 		pipe = inode->i_pipe;
890 		pipe->files++;
891 		spin_unlock(&inode->i_lock);
892 	} else {
893 		spin_unlock(&inode->i_lock);
894 		pipe = alloc_pipe_info();
895 		if (!pipe)
896 			return -ENOMEM;
897 		pipe->files = 1;
898 		spin_lock(&inode->i_lock);
899 		if (unlikely(inode->i_pipe)) {
900 			inode->i_pipe->files++;
901 			spin_unlock(&inode->i_lock);
902 			free_pipe_info(pipe);
903 			pipe = inode->i_pipe;
904 		} else {
905 			inode->i_pipe = pipe;
906 			spin_unlock(&inode->i_lock);
907 		}
908 	}
909 	filp->private_data = pipe;
910 	/* OK, we have a pipe and it's pinned down */
911 
912 	__pipe_lock(pipe);
913 
914 	/* We can only do regular read/write on fifos */
915 	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
916 
917 	switch (filp->f_mode) {
918 	case FMODE_READ:
919 	/*
920 	 *  O_RDONLY
921 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
922 	 *  opened, even when there is no process writing the FIFO.
923 	 */
924 		pipe->r_counter++;
925 		if (pipe->readers++ == 0)
926 			wake_up_partner(pipe);
927 
928 		if (!is_pipe && !pipe->writers) {
929 			if ((filp->f_flags & O_NONBLOCK)) {
930 				/* suppress POLLHUP until we have
931 				 * seen a writer */
932 				filp->f_version = pipe->w_counter;
933 			} else {
934 				if (wait_for_partner(pipe, &pipe->w_counter))
935 					goto err_rd;
936 			}
937 		}
938 		break;
939 
940 	case FMODE_WRITE:
941 	/*
942 	 *  O_WRONLY
943 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
944 	 *  errno=ENXIO when there is no process reading the FIFO.
945 	 */
946 		ret = -ENXIO;
947 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
948 			goto err;
949 
950 		pipe->w_counter++;
951 		if (!pipe->writers++)
952 			wake_up_partner(pipe);
953 
954 		if (!is_pipe && !pipe->readers) {
955 			if (wait_for_partner(pipe, &pipe->r_counter))
956 				goto err_wr;
957 		}
958 		break;
959 
960 	case FMODE_READ | FMODE_WRITE:
961 	/*
962 	 *  O_RDWR
963 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
964 	 *  This implementation will NEVER block on a O_RDWR open, since
965 	 *  the process can at least talk to itself.
966 	 */
967 
968 		pipe->readers++;
969 		pipe->writers++;
970 		pipe->r_counter++;
971 		pipe->w_counter++;
972 		if (pipe->readers == 1 || pipe->writers == 1)
973 			wake_up_partner(pipe);
974 		break;
975 
976 	default:
977 		ret = -EINVAL;
978 		goto err;
979 	}
980 
981 	/* Ok! */
982 	__pipe_unlock(pipe);
983 	return 0;
984 
985 err_rd:
986 	if (!--pipe->readers)
987 		wake_up_interruptible(&pipe->wait);
988 	ret = -ERESTARTSYS;
989 	goto err;
990 
991 err_wr:
992 	if (!--pipe->writers)
993 		wake_up_interruptible(&pipe->wait);
994 	ret = -ERESTARTSYS;
995 	goto err;
996 
997 err:
998 	__pipe_unlock(pipe);
999 
1000 	put_pipe_info(inode, pipe);
1001 	return ret;
1002 }
1003 
1004 const struct file_operations pipefifo_fops = {
1005 	.open		= fifo_open,
1006 	.llseek		= no_llseek,
1007 	.read_iter	= pipe_read,
1008 	.write_iter	= pipe_write,
1009 	.poll		= pipe_poll,
1010 	.unlocked_ioctl	= pipe_ioctl,
1011 	.release	= pipe_release,
1012 	.fasync		= pipe_fasync,
1013 };
1014 
1015 /*
1016  * Allocate a new array of pipe buffers and copy the info over. Returns the
1017  * pipe size if successful, or return -ERROR on error.
1018  */
1019 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
1020 {
1021 	struct pipe_buffer *bufs;
1022 
1023 	/*
1024 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1025 	 * expect a lot of shrink+grow operations, just free and allocate
1026 	 * again like we would do for growing. If the pipe currently
1027 	 * contains more buffers than arg, then return busy.
1028 	 */
1029 	if (nr_pages < pipe->nrbufs)
1030 		return -EBUSY;
1031 
1032 	bufs = kcalloc(nr_pages, sizeof(*bufs),
1033 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1034 	if (unlikely(!bufs))
1035 		return -ENOMEM;
1036 
1037 	/*
1038 	 * The pipe array wraps around, so just start the new one at zero
1039 	 * and adjust the indexes.
1040 	 */
1041 	if (pipe->nrbufs) {
1042 		unsigned int tail;
1043 		unsigned int head;
1044 
1045 		tail = pipe->curbuf + pipe->nrbufs;
1046 		if (tail < pipe->buffers)
1047 			tail = 0;
1048 		else
1049 			tail &= (pipe->buffers - 1);
1050 
1051 		head = pipe->nrbufs - tail;
1052 		if (head)
1053 			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1054 		if (tail)
1055 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1056 	}
1057 
1058 	account_pipe_buffers(pipe, pipe->buffers, nr_pages);
1059 	pipe->curbuf = 0;
1060 	kfree(pipe->bufs);
1061 	pipe->bufs = bufs;
1062 	pipe->buffers = nr_pages;
1063 	return nr_pages * PAGE_SIZE;
1064 }
1065 
1066 /*
1067  * Currently we rely on the pipe array holding a power-of-2 number
1068  * of pages.
1069  */
1070 static inline unsigned int round_pipe_size(unsigned int size)
1071 {
1072 	unsigned long nr_pages;
1073 
1074 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1075 	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1076 }
1077 
1078 /*
1079  * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
1080  * will return an error.
1081  */
1082 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1083 		 size_t *lenp, loff_t *ppos)
1084 {
1085 	int ret;
1086 
1087 	ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1088 	if (ret < 0 || !write)
1089 		return ret;
1090 
1091 	pipe_max_size = round_pipe_size(pipe_max_size);
1092 	return ret;
1093 }
1094 
1095 /*
1096  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1097  * location, so checking ->i_pipe is not enough to verify that this is a
1098  * pipe.
1099  */
1100 struct pipe_inode_info *get_pipe_info(struct file *file)
1101 {
1102 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1103 }
1104 
1105 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1106 {
1107 	struct pipe_inode_info *pipe;
1108 	long ret;
1109 
1110 	pipe = get_pipe_info(file);
1111 	if (!pipe)
1112 		return -EBADF;
1113 
1114 	__pipe_lock(pipe);
1115 
1116 	switch (cmd) {
1117 	case F_SETPIPE_SZ: {
1118 		unsigned int size, nr_pages;
1119 
1120 		size = round_pipe_size(arg);
1121 		nr_pages = size >> PAGE_SHIFT;
1122 
1123 		ret = -EINVAL;
1124 		if (!nr_pages)
1125 			goto out;
1126 
1127 		if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
1128 			ret = -EPERM;
1129 			goto out;
1130 		} else if ((too_many_pipe_buffers_hard(pipe->user) ||
1131 			    too_many_pipe_buffers_soft(pipe->user)) &&
1132 		           !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1133 			ret = -EPERM;
1134 			goto out;
1135 		}
1136 		ret = pipe_set_size(pipe, nr_pages);
1137 		break;
1138 		}
1139 	case F_GETPIPE_SZ:
1140 		ret = pipe->buffers * PAGE_SIZE;
1141 		break;
1142 	default:
1143 		ret = -EINVAL;
1144 		break;
1145 	}
1146 
1147 out:
1148 	__pipe_unlock(pipe);
1149 	return ret;
1150 }
1151 
1152 static const struct super_operations pipefs_ops = {
1153 	.destroy_inode = free_inode_nonrcu,
1154 	.statfs = simple_statfs,
1155 };
1156 
1157 /*
1158  * pipefs should _never_ be mounted by userland - too much of security hassle,
1159  * no real gain from having the whole whorehouse mounted. So we don't need
1160  * any operations on the root directory. However, we need a non-trivial
1161  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1162  */
1163 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1164 			 int flags, const char *dev_name, void *data)
1165 {
1166 	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1167 			&pipefs_dentry_operations, PIPEFS_MAGIC);
1168 }
1169 
1170 static struct file_system_type pipe_fs_type = {
1171 	.name		= "pipefs",
1172 	.mount		= pipefs_mount,
1173 	.kill_sb	= kill_anon_super,
1174 };
1175 
1176 static int __init init_pipe_fs(void)
1177 {
1178 	int err = register_filesystem(&pipe_fs_type);
1179 
1180 	if (!err) {
1181 		pipe_mnt = kern_mount(&pipe_fs_type);
1182 		if (IS_ERR(pipe_mnt)) {
1183 			err = PTR_ERR(pipe_mnt);
1184 			unregister_filesystem(&pipe_fs_type);
1185 		}
1186 	}
1187 	return err;
1188 }
1189 
1190 fs_initcall(init_pipe_fs);
1191