xref: /linux/fs/pipe.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/magic.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/uio.h>
20 #include <linux/highmem.h>
21 #include <linux/pagemap.h>
22 #include <linux/audit.h>
23 #include <linux/syscalls.h>
24 #include <linux/fcntl.h>
25 #include <linux/memcontrol.h>
26 
27 #include <linux/uaccess.h>
28 #include <asm/ioctls.h>
29 
30 #include "internal.h"
31 
32 /*
33  * The max size that a non-root user is allowed to grow the pipe. Can
34  * be set by root in /proc/sys/fs/pipe-max-size
35  */
36 unsigned int pipe_max_size = 1048576;
37 
38 /*
39  * Minimum pipe size, as required by POSIX
40  */
41 unsigned int pipe_min_size = PAGE_SIZE;
42 
43 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
44  * matches default values.
45  */
46 unsigned long pipe_user_pages_hard;
47 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
48 
49 /*
50  * We use a start+len construction, which provides full use of the
51  * allocated memory.
52  * -- Florian Coosmann (FGC)
53  *
54  * Reads with count = 0 should always return 0.
55  * -- Julian Bradfield 1999-06-07.
56  *
57  * FIFOs and Pipes now generate SIGIO for both readers and writers.
58  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
59  *
60  * pipe_read & write cleanup
61  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
62  */
63 
64 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65 {
66 	if (pipe->files)
67 		mutex_lock_nested(&pipe->mutex, subclass);
68 }
69 
70 void pipe_lock(struct pipe_inode_info *pipe)
71 {
72 	/*
73 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
74 	 */
75 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
76 }
77 EXPORT_SYMBOL(pipe_lock);
78 
79 void pipe_unlock(struct pipe_inode_info *pipe)
80 {
81 	if (pipe->files)
82 		mutex_unlock(&pipe->mutex);
83 }
84 EXPORT_SYMBOL(pipe_unlock);
85 
86 static inline void __pipe_lock(struct pipe_inode_info *pipe)
87 {
88 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
89 }
90 
91 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
92 {
93 	mutex_unlock(&pipe->mutex);
94 }
95 
96 void pipe_double_lock(struct pipe_inode_info *pipe1,
97 		      struct pipe_inode_info *pipe2)
98 {
99 	BUG_ON(pipe1 == pipe2);
100 
101 	if (pipe1 < pipe2) {
102 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
103 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
104 	} else {
105 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
106 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
107 	}
108 }
109 
110 /* Drop the inode semaphore and wait for a pipe event, atomically */
111 void pipe_wait(struct pipe_inode_info *pipe)
112 {
113 	DEFINE_WAIT(wait);
114 
115 	/*
116 	 * Pipes are system-local resources, so sleeping on them
117 	 * is considered a noninteractive wait:
118 	 */
119 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
120 	pipe_unlock(pipe);
121 	schedule();
122 	finish_wait(&pipe->wait, &wait);
123 	pipe_lock(pipe);
124 }
125 
126 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
127 				  struct pipe_buffer *buf)
128 {
129 	struct page *page = buf->page;
130 
131 	/*
132 	 * If nobody else uses this page, and we don't already have a
133 	 * temporary page, let's keep track of it as a one-deep
134 	 * allocation cache. (Otherwise just release our reference to it)
135 	 */
136 	if (page_count(page) == 1 && !pipe->tmp_page)
137 		pipe->tmp_page = page;
138 	else
139 		put_page(page);
140 }
141 
142 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
143 			       struct pipe_buffer *buf)
144 {
145 	struct page *page = buf->page;
146 
147 	if (page_count(page) == 1) {
148 		if (memcg_kmem_enabled())
149 			memcg_kmem_uncharge(page, 0);
150 		__SetPageLocked(page);
151 		return 0;
152 	}
153 	return 1;
154 }
155 
156 /**
157  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
158  * @pipe:	the pipe that the buffer belongs to
159  * @buf:	the buffer to attempt to steal
160  *
161  * Description:
162  *	This function attempts to steal the &struct page attached to
163  *	@buf. If successful, this function returns 0 and returns with
164  *	the page locked. The caller may then reuse the page for whatever
165  *	he wishes; the typical use is insertion into a different file
166  *	page cache.
167  */
168 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
169 			   struct pipe_buffer *buf)
170 {
171 	struct page *page = buf->page;
172 
173 	/*
174 	 * A reference of one is golden, that means that the owner of this
175 	 * page is the only one holding a reference to it. lock the page
176 	 * and return OK.
177 	 */
178 	if (page_count(page) == 1) {
179 		lock_page(page);
180 		return 0;
181 	}
182 
183 	return 1;
184 }
185 EXPORT_SYMBOL(generic_pipe_buf_steal);
186 
187 /**
188  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
189  * @pipe:	the pipe that the buffer belongs to
190  * @buf:	the buffer to get a reference to
191  *
192  * Description:
193  *	This function grabs an extra reference to @buf. It's used in
194  *	in the tee() system call, when we duplicate the buffers in one
195  *	pipe into another.
196  */
197 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
198 {
199 	get_page(buf->page);
200 }
201 EXPORT_SYMBOL(generic_pipe_buf_get);
202 
203 /**
204  * generic_pipe_buf_confirm - verify contents of the pipe buffer
205  * @info:	the pipe that the buffer belongs to
206  * @buf:	the buffer to confirm
207  *
208  * Description:
209  *	This function does nothing, because the generic pipe code uses
210  *	pages that are always good when inserted into the pipe.
211  */
212 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
213 			     struct pipe_buffer *buf)
214 {
215 	return 0;
216 }
217 EXPORT_SYMBOL(generic_pipe_buf_confirm);
218 
219 /**
220  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
221  * @pipe:	the pipe that the buffer belongs to
222  * @buf:	the buffer to put a reference to
223  *
224  * Description:
225  *	This function releases a reference to @buf.
226  */
227 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
228 			      struct pipe_buffer *buf)
229 {
230 	put_page(buf->page);
231 }
232 EXPORT_SYMBOL(generic_pipe_buf_release);
233 
234 static const struct pipe_buf_operations anon_pipe_buf_ops = {
235 	.can_merge = 1,
236 	.confirm = generic_pipe_buf_confirm,
237 	.release = anon_pipe_buf_release,
238 	.steal = anon_pipe_buf_steal,
239 	.get = generic_pipe_buf_get,
240 };
241 
242 static const struct pipe_buf_operations packet_pipe_buf_ops = {
243 	.can_merge = 0,
244 	.confirm = generic_pipe_buf_confirm,
245 	.release = anon_pipe_buf_release,
246 	.steal = anon_pipe_buf_steal,
247 	.get = generic_pipe_buf_get,
248 };
249 
250 static ssize_t
251 pipe_read(struct kiocb *iocb, struct iov_iter *to)
252 {
253 	size_t total_len = iov_iter_count(to);
254 	struct file *filp = iocb->ki_filp;
255 	struct pipe_inode_info *pipe = filp->private_data;
256 	int do_wakeup;
257 	ssize_t ret;
258 
259 	/* Null read succeeds. */
260 	if (unlikely(total_len == 0))
261 		return 0;
262 
263 	do_wakeup = 0;
264 	ret = 0;
265 	__pipe_lock(pipe);
266 	for (;;) {
267 		int bufs = pipe->nrbufs;
268 		if (bufs) {
269 			int curbuf = pipe->curbuf;
270 			struct pipe_buffer *buf = pipe->bufs + curbuf;
271 			size_t chars = buf->len;
272 			size_t written;
273 			int error;
274 
275 			if (chars > total_len)
276 				chars = total_len;
277 
278 			error = pipe_buf_confirm(pipe, buf);
279 			if (error) {
280 				if (!ret)
281 					ret = error;
282 				break;
283 			}
284 
285 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
286 			if (unlikely(written < chars)) {
287 				if (!ret)
288 					ret = -EFAULT;
289 				break;
290 			}
291 			ret += chars;
292 			buf->offset += chars;
293 			buf->len -= chars;
294 
295 			/* Was it a packet buffer? Clean up and exit */
296 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
297 				total_len = chars;
298 				buf->len = 0;
299 			}
300 
301 			if (!buf->len) {
302 				pipe_buf_release(pipe, buf);
303 				curbuf = (curbuf + 1) & (pipe->buffers - 1);
304 				pipe->curbuf = curbuf;
305 				pipe->nrbufs = --bufs;
306 				do_wakeup = 1;
307 			}
308 			total_len -= chars;
309 			if (!total_len)
310 				break;	/* common path: read succeeded */
311 		}
312 		if (bufs)	/* More to do? */
313 			continue;
314 		if (!pipe->writers)
315 			break;
316 		if (!pipe->waiting_writers) {
317 			/* syscall merging: Usually we must not sleep
318 			 * if O_NONBLOCK is set, or if we got some data.
319 			 * But if a writer sleeps in kernel space, then
320 			 * we can wait for that data without violating POSIX.
321 			 */
322 			if (ret)
323 				break;
324 			if (filp->f_flags & O_NONBLOCK) {
325 				ret = -EAGAIN;
326 				break;
327 			}
328 		}
329 		if (signal_pending(current)) {
330 			if (!ret)
331 				ret = -ERESTARTSYS;
332 			break;
333 		}
334 		if (do_wakeup) {
335 			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
336  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
337 		}
338 		pipe_wait(pipe);
339 	}
340 	__pipe_unlock(pipe);
341 
342 	/* Signal writers asynchronously that there is more room. */
343 	if (do_wakeup) {
344 		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
345 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
346 	}
347 	if (ret > 0)
348 		file_accessed(filp);
349 	return ret;
350 }
351 
352 static inline int is_packetized(struct file *file)
353 {
354 	return (file->f_flags & O_DIRECT) != 0;
355 }
356 
357 static ssize_t
358 pipe_write(struct kiocb *iocb, struct iov_iter *from)
359 {
360 	struct file *filp = iocb->ki_filp;
361 	struct pipe_inode_info *pipe = filp->private_data;
362 	ssize_t ret = 0;
363 	int do_wakeup = 0;
364 	size_t total_len = iov_iter_count(from);
365 	ssize_t chars;
366 
367 	/* Null write succeeds. */
368 	if (unlikely(total_len == 0))
369 		return 0;
370 
371 	__pipe_lock(pipe);
372 
373 	if (!pipe->readers) {
374 		send_sig(SIGPIPE, current, 0);
375 		ret = -EPIPE;
376 		goto out;
377 	}
378 
379 	/* We try to merge small writes */
380 	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
381 	if (pipe->nrbufs && chars != 0) {
382 		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
383 							(pipe->buffers - 1);
384 		struct pipe_buffer *buf = pipe->bufs + lastbuf;
385 		int offset = buf->offset + buf->len;
386 
387 		if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
388 			ret = pipe_buf_confirm(pipe, buf);
389 			if (ret)
390 				goto out;
391 
392 			ret = copy_page_from_iter(buf->page, offset, chars, from);
393 			if (unlikely(ret < chars)) {
394 				ret = -EFAULT;
395 				goto out;
396 			}
397 			do_wakeup = 1;
398 			buf->len += ret;
399 			if (!iov_iter_count(from))
400 				goto out;
401 		}
402 	}
403 
404 	for (;;) {
405 		int bufs;
406 
407 		if (!pipe->readers) {
408 			send_sig(SIGPIPE, current, 0);
409 			if (!ret)
410 				ret = -EPIPE;
411 			break;
412 		}
413 		bufs = pipe->nrbufs;
414 		if (bufs < pipe->buffers) {
415 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
416 			struct pipe_buffer *buf = pipe->bufs + newbuf;
417 			struct page *page = pipe->tmp_page;
418 			int copied;
419 
420 			if (!page) {
421 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
422 				if (unlikely(!page)) {
423 					ret = ret ? : -ENOMEM;
424 					break;
425 				}
426 				pipe->tmp_page = page;
427 			}
428 			/* Always wake up, even if the copy fails. Otherwise
429 			 * we lock up (O_NONBLOCK-)readers that sleep due to
430 			 * syscall merging.
431 			 * FIXME! Is this really true?
432 			 */
433 			do_wakeup = 1;
434 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
435 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
436 				if (!ret)
437 					ret = -EFAULT;
438 				break;
439 			}
440 			ret += copied;
441 
442 			/* Insert it into the buffer array */
443 			buf->page = page;
444 			buf->ops = &anon_pipe_buf_ops;
445 			buf->offset = 0;
446 			buf->len = copied;
447 			buf->flags = 0;
448 			if (is_packetized(filp)) {
449 				buf->ops = &packet_pipe_buf_ops;
450 				buf->flags = PIPE_BUF_FLAG_PACKET;
451 			}
452 			pipe->nrbufs = ++bufs;
453 			pipe->tmp_page = NULL;
454 
455 			if (!iov_iter_count(from))
456 				break;
457 		}
458 		if (bufs < pipe->buffers)
459 			continue;
460 		if (filp->f_flags & O_NONBLOCK) {
461 			if (!ret)
462 				ret = -EAGAIN;
463 			break;
464 		}
465 		if (signal_pending(current)) {
466 			if (!ret)
467 				ret = -ERESTARTSYS;
468 			break;
469 		}
470 		if (do_wakeup) {
471 			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
472 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
473 			do_wakeup = 0;
474 		}
475 		pipe->waiting_writers++;
476 		pipe_wait(pipe);
477 		pipe->waiting_writers--;
478 	}
479 out:
480 	__pipe_unlock(pipe);
481 	if (do_wakeup) {
482 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
483 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
484 	}
485 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
486 		int err = file_update_time(filp);
487 		if (err)
488 			ret = err;
489 		sb_end_write(file_inode(filp)->i_sb);
490 	}
491 	return ret;
492 }
493 
494 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
495 {
496 	struct pipe_inode_info *pipe = filp->private_data;
497 	int count, buf, nrbufs;
498 
499 	switch (cmd) {
500 		case FIONREAD:
501 			__pipe_lock(pipe);
502 			count = 0;
503 			buf = pipe->curbuf;
504 			nrbufs = pipe->nrbufs;
505 			while (--nrbufs >= 0) {
506 				count += pipe->bufs[buf].len;
507 				buf = (buf+1) & (pipe->buffers - 1);
508 			}
509 			__pipe_unlock(pipe);
510 
511 			return put_user(count, (int __user *)arg);
512 		default:
513 			return -ENOIOCTLCMD;
514 	}
515 }
516 
517 /* No kernel lock held - fine */
518 static unsigned int
519 pipe_poll(struct file *filp, poll_table *wait)
520 {
521 	unsigned int mask;
522 	struct pipe_inode_info *pipe = filp->private_data;
523 	int nrbufs;
524 
525 	poll_wait(filp, &pipe->wait, wait);
526 
527 	/* Reading only -- no need for acquiring the semaphore.  */
528 	nrbufs = pipe->nrbufs;
529 	mask = 0;
530 	if (filp->f_mode & FMODE_READ) {
531 		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
532 		if (!pipe->writers && filp->f_version != pipe->w_counter)
533 			mask |= POLLHUP;
534 	}
535 
536 	if (filp->f_mode & FMODE_WRITE) {
537 		mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
538 		/*
539 		 * Most Unices do not set POLLERR for FIFOs but on Linux they
540 		 * behave exactly like pipes for poll().
541 		 */
542 		if (!pipe->readers)
543 			mask |= POLLERR;
544 	}
545 
546 	return mask;
547 }
548 
549 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
550 {
551 	int kill = 0;
552 
553 	spin_lock(&inode->i_lock);
554 	if (!--pipe->files) {
555 		inode->i_pipe = NULL;
556 		kill = 1;
557 	}
558 	spin_unlock(&inode->i_lock);
559 
560 	if (kill)
561 		free_pipe_info(pipe);
562 }
563 
564 static int
565 pipe_release(struct inode *inode, struct file *file)
566 {
567 	struct pipe_inode_info *pipe = file->private_data;
568 
569 	__pipe_lock(pipe);
570 	if (file->f_mode & FMODE_READ)
571 		pipe->readers--;
572 	if (file->f_mode & FMODE_WRITE)
573 		pipe->writers--;
574 
575 	if (pipe->readers || pipe->writers) {
576 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
577 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
578 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
579 	}
580 	__pipe_unlock(pipe);
581 
582 	put_pipe_info(inode, pipe);
583 	return 0;
584 }
585 
586 static int
587 pipe_fasync(int fd, struct file *filp, int on)
588 {
589 	struct pipe_inode_info *pipe = filp->private_data;
590 	int retval = 0;
591 
592 	__pipe_lock(pipe);
593 	if (filp->f_mode & FMODE_READ)
594 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
595 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
596 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
597 		if (retval < 0 && (filp->f_mode & FMODE_READ))
598 			/* this can happen only if on == T */
599 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
600 	}
601 	__pipe_unlock(pipe);
602 	return retval;
603 }
604 
605 static unsigned long account_pipe_buffers(struct user_struct *user,
606                                  unsigned long old, unsigned long new)
607 {
608 	return atomic_long_add_return(new - old, &user->pipe_bufs);
609 }
610 
611 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
612 {
613 	return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
614 }
615 
616 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
617 {
618 	return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
619 }
620 
621 struct pipe_inode_info *alloc_pipe_info(void)
622 {
623 	struct pipe_inode_info *pipe;
624 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
625 	struct user_struct *user = get_current_user();
626 	unsigned long user_bufs;
627 
628 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
629 	if (pipe == NULL)
630 		goto out_free_uid;
631 
632 	if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
633 		pipe_bufs = pipe_max_size >> PAGE_SHIFT;
634 
635 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
636 
637 	if (too_many_pipe_buffers_soft(user_bufs)) {
638 		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
639 		pipe_bufs = 1;
640 	}
641 
642 	if (too_many_pipe_buffers_hard(user_bufs))
643 		goto out_revert_acct;
644 
645 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
646 			     GFP_KERNEL_ACCOUNT);
647 
648 	if (pipe->bufs) {
649 		init_waitqueue_head(&pipe->wait);
650 		pipe->r_counter = pipe->w_counter = 1;
651 		pipe->buffers = pipe_bufs;
652 		pipe->user = user;
653 		mutex_init(&pipe->mutex);
654 		return pipe;
655 	}
656 
657 out_revert_acct:
658 	(void) account_pipe_buffers(user, pipe_bufs, 0);
659 	kfree(pipe);
660 out_free_uid:
661 	free_uid(user);
662 	return NULL;
663 }
664 
665 void free_pipe_info(struct pipe_inode_info *pipe)
666 {
667 	int i;
668 
669 	(void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
670 	free_uid(pipe->user);
671 	for (i = 0; i < pipe->buffers; i++) {
672 		struct pipe_buffer *buf = pipe->bufs + i;
673 		if (buf->ops)
674 			pipe_buf_release(pipe, buf);
675 	}
676 	if (pipe->tmp_page)
677 		__free_page(pipe->tmp_page);
678 	kfree(pipe->bufs);
679 	kfree(pipe);
680 }
681 
682 static struct vfsmount *pipe_mnt __read_mostly;
683 
684 /*
685  * pipefs_dname() is called from d_path().
686  */
687 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
688 {
689 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
690 				d_inode(dentry)->i_ino);
691 }
692 
693 static const struct dentry_operations pipefs_dentry_operations = {
694 	.d_dname	= pipefs_dname,
695 };
696 
697 static struct inode * get_pipe_inode(void)
698 {
699 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
700 	struct pipe_inode_info *pipe;
701 
702 	if (!inode)
703 		goto fail_inode;
704 
705 	inode->i_ino = get_next_ino();
706 
707 	pipe = alloc_pipe_info();
708 	if (!pipe)
709 		goto fail_iput;
710 
711 	inode->i_pipe = pipe;
712 	pipe->files = 2;
713 	pipe->readers = pipe->writers = 1;
714 	inode->i_fop = &pipefifo_fops;
715 
716 	/*
717 	 * Mark the inode dirty from the very beginning,
718 	 * that way it will never be moved to the dirty
719 	 * list because "mark_inode_dirty()" will think
720 	 * that it already _is_ on the dirty list.
721 	 */
722 	inode->i_state = I_DIRTY;
723 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
724 	inode->i_uid = current_fsuid();
725 	inode->i_gid = current_fsgid();
726 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
727 
728 	return inode;
729 
730 fail_iput:
731 	iput(inode);
732 
733 fail_inode:
734 	return NULL;
735 }
736 
737 int create_pipe_files(struct file **res, int flags)
738 {
739 	int err;
740 	struct inode *inode = get_pipe_inode();
741 	struct file *f;
742 	struct path path;
743 
744 	if (!inode)
745 		return -ENFILE;
746 
747 	err = -ENOMEM;
748 	path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
749 	if (!path.dentry)
750 		goto err_inode;
751 	path.mnt = mntget(pipe_mnt);
752 
753 	d_instantiate(path.dentry, inode);
754 
755 	f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
756 	if (IS_ERR(f)) {
757 		err = PTR_ERR(f);
758 		goto err_dentry;
759 	}
760 
761 	f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
762 	f->private_data = inode->i_pipe;
763 
764 	res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
765 	if (IS_ERR(res[0])) {
766 		err = PTR_ERR(res[0]);
767 		goto err_file;
768 	}
769 
770 	path_get(&path);
771 	res[0]->private_data = inode->i_pipe;
772 	res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
773 	res[1] = f;
774 	return 0;
775 
776 err_file:
777 	put_filp(f);
778 err_dentry:
779 	free_pipe_info(inode->i_pipe);
780 	path_put(&path);
781 	return err;
782 
783 err_inode:
784 	free_pipe_info(inode->i_pipe);
785 	iput(inode);
786 	return err;
787 }
788 
789 static int __do_pipe_flags(int *fd, struct file **files, int flags)
790 {
791 	int error;
792 	int fdw, fdr;
793 
794 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
795 		return -EINVAL;
796 
797 	error = create_pipe_files(files, flags);
798 	if (error)
799 		return error;
800 
801 	error = get_unused_fd_flags(flags);
802 	if (error < 0)
803 		goto err_read_pipe;
804 	fdr = error;
805 
806 	error = get_unused_fd_flags(flags);
807 	if (error < 0)
808 		goto err_fdr;
809 	fdw = error;
810 
811 	audit_fd_pair(fdr, fdw);
812 	fd[0] = fdr;
813 	fd[1] = fdw;
814 	return 0;
815 
816  err_fdr:
817 	put_unused_fd(fdr);
818  err_read_pipe:
819 	fput(files[0]);
820 	fput(files[1]);
821 	return error;
822 }
823 
824 int do_pipe_flags(int *fd, int flags)
825 {
826 	struct file *files[2];
827 	int error = __do_pipe_flags(fd, files, flags);
828 	if (!error) {
829 		fd_install(fd[0], files[0]);
830 		fd_install(fd[1], files[1]);
831 	}
832 	return error;
833 }
834 
835 /*
836  * sys_pipe() is the normal C calling standard for creating
837  * a pipe. It's not the way Unix traditionally does this, though.
838  */
839 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
840 {
841 	struct file *files[2];
842 	int fd[2];
843 	int error;
844 
845 	error = __do_pipe_flags(fd, files, flags);
846 	if (!error) {
847 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
848 			fput(files[0]);
849 			fput(files[1]);
850 			put_unused_fd(fd[0]);
851 			put_unused_fd(fd[1]);
852 			error = -EFAULT;
853 		} else {
854 			fd_install(fd[0], files[0]);
855 			fd_install(fd[1], files[1]);
856 		}
857 	}
858 	return error;
859 }
860 
861 SYSCALL_DEFINE1(pipe, int __user *, fildes)
862 {
863 	return sys_pipe2(fildes, 0);
864 }
865 
866 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
867 {
868 	int cur = *cnt;
869 
870 	while (cur == *cnt) {
871 		pipe_wait(pipe);
872 		if (signal_pending(current))
873 			break;
874 	}
875 	return cur == *cnt ? -ERESTARTSYS : 0;
876 }
877 
878 static void wake_up_partner(struct pipe_inode_info *pipe)
879 {
880 	wake_up_interruptible(&pipe->wait);
881 }
882 
883 static int fifo_open(struct inode *inode, struct file *filp)
884 {
885 	struct pipe_inode_info *pipe;
886 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
887 	int ret;
888 
889 	filp->f_version = 0;
890 
891 	spin_lock(&inode->i_lock);
892 	if (inode->i_pipe) {
893 		pipe = inode->i_pipe;
894 		pipe->files++;
895 		spin_unlock(&inode->i_lock);
896 	} else {
897 		spin_unlock(&inode->i_lock);
898 		pipe = alloc_pipe_info();
899 		if (!pipe)
900 			return -ENOMEM;
901 		pipe->files = 1;
902 		spin_lock(&inode->i_lock);
903 		if (unlikely(inode->i_pipe)) {
904 			inode->i_pipe->files++;
905 			spin_unlock(&inode->i_lock);
906 			free_pipe_info(pipe);
907 			pipe = inode->i_pipe;
908 		} else {
909 			inode->i_pipe = pipe;
910 			spin_unlock(&inode->i_lock);
911 		}
912 	}
913 	filp->private_data = pipe;
914 	/* OK, we have a pipe and it's pinned down */
915 
916 	__pipe_lock(pipe);
917 
918 	/* We can only do regular read/write on fifos */
919 	filp->f_mode &= (FMODE_READ | FMODE_WRITE);
920 
921 	switch (filp->f_mode) {
922 	case FMODE_READ:
923 	/*
924 	 *  O_RDONLY
925 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
926 	 *  opened, even when there is no process writing the FIFO.
927 	 */
928 		pipe->r_counter++;
929 		if (pipe->readers++ == 0)
930 			wake_up_partner(pipe);
931 
932 		if (!is_pipe && !pipe->writers) {
933 			if ((filp->f_flags & O_NONBLOCK)) {
934 				/* suppress POLLHUP until we have
935 				 * seen a writer */
936 				filp->f_version = pipe->w_counter;
937 			} else {
938 				if (wait_for_partner(pipe, &pipe->w_counter))
939 					goto err_rd;
940 			}
941 		}
942 		break;
943 
944 	case FMODE_WRITE:
945 	/*
946 	 *  O_WRONLY
947 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
948 	 *  errno=ENXIO when there is no process reading the FIFO.
949 	 */
950 		ret = -ENXIO;
951 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
952 			goto err;
953 
954 		pipe->w_counter++;
955 		if (!pipe->writers++)
956 			wake_up_partner(pipe);
957 
958 		if (!is_pipe && !pipe->readers) {
959 			if (wait_for_partner(pipe, &pipe->r_counter))
960 				goto err_wr;
961 		}
962 		break;
963 
964 	case FMODE_READ | FMODE_WRITE:
965 	/*
966 	 *  O_RDWR
967 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
968 	 *  This implementation will NEVER block on a O_RDWR open, since
969 	 *  the process can at least talk to itself.
970 	 */
971 
972 		pipe->readers++;
973 		pipe->writers++;
974 		pipe->r_counter++;
975 		pipe->w_counter++;
976 		if (pipe->readers == 1 || pipe->writers == 1)
977 			wake_up_partner(pipe);
978 		break;
979 
980 	default:
981 		ret = -EINVAL;
982 		goto err;
983 	}
984 
985 	/* Ok! */
986 	__pipe_unlock(pipe);
987 	return 0;
988 
989 err_rd:
990 	if (!--pipe->readers)
991 		wake_up_interruptible(&pipe->wait);
992 	ret = -ERESTARTSYS;
993 	goto err;
994 
995 err_wr:
996 	if (!--pipe->writers)
997 		wake_up_interruptible(&pipe->wait);
998 	ret = -ERESTARTSYS;
999 	goto err;
1000 
1001 err:
1002 	__pipe_unlock(pipe);
1003 
1004 	put_pipe_info(inode, pipe);
1005 	return ret;
1006 }
1007 
1008 const struct file_operations pipefifo_fops = {
1009 	.open		= fifo_open,
1010 	.llseek		= no_llseek,
1011 	.read_iter	= pipe_read,
1012 	.write_iter	= pipe_write,
1013 	.poll		= pipe_poll,
1014 	.unlocked_ioctl	= pipe_ioctl,
1015 	.release	= pipe_release,
1016 	.fasync		= pipe_fasync,
1017 };
1018 
1019 /*
1020  * Currently we rely on the pipe array holding a power-of-2 number
1021  * of pages. Returns 0 on error.
1022  */
1023 unsigned int round_pipe_size(unsigned int size)
1024 {
1025 	unsigned long nr_pages;
1026 
1027 	if (size < pipe_min_size)
1028 		size = pipe_min_size;
1029 
1030 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1031 	if (nr_pages == 0)
1032 		return 0;
1033 
1034 	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1035 }
1036 
1037 /*
1038  * Allocate a new array of pipe buffers and copy the info over. Returns the
1039  * pipe size if successful, or return -ERROR on error.
1040  */
1041 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1042 {
1043 	struct pipe_buffer *bufs;
1044 	unsigned int size, nr_pages;
1045 	unsigned long user_bufs;
1046 	long ret = 0;
1047 
1048 	size = round_pipe_size(arg);
1049 	if (size == 0)
1050 		return -EINVAL;
1051 	nr_pages = size >> PAGE_SHIFT;
1052 
1053 	if (!nr_pages)
1054 		return -EINVAL;
1055 
1056 	/*
1057 	 * If trying to increase the pipe capacity, check that an
1058 	 * unprivileged user is not trying to exceed various limits
1059 	 * (soft limit check here, hard limit check just below).
1060 	 * Decreasing the pipe capacity is always permitted, even
1061 	 * if the user is currently over a limit.
1062 	 */
1063 	if (nr_pages > pipe->buffers &&
1064 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1065 		return -EPERM;
1066 
1067 	user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1068 
1069 	if (nr_pages > pipe->buffers &&
1070 			(too_many_pipe_buffers_hard(user_bufs) ||
1071 			 too_many_pipe_buffers_soft(user_bufs)) &&
1072 			!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1073 		ret = -EPERM;
1074 		goto out_revert_acct;
1075 	}
1076 
1077 	/*
1078 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1079 	 * expect a lot of shrink+grow operations, just free and allocate
1080 	 * again like we would do for growing. If the pipe currently
1081 	 * contains more buffers than arg, then return busy.
1082 	 */
1083 	if (nr_pages < pipe->nrbufs) {
1084 		ret = -EBUSY;
1085 		goto out_revert_acct;
1086 	}
1087 
1088 	bufs = kcalloc(nr_pages, sizeof(*bufs),
1089 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1090 	if (unlikely(!bufs)) {
1091 		ret = -ENOMEM;
1092 		goto out_revert_acct;
1093 	}
1094 
1095 	/*
1096 	 * The pipe array wraps around, so just start the new one at zero
1097 	 * and adjust the indexes.
1098 	 */
1099 	if (pipe->nrbufs) {
1100 		unsigned int tail;
1101 		unsigned int head;
1102 
1103 		tail = pipe->curbuf + pipe->nrbufs;
1104 		if (tail < pipe->buffers)
1105 			tail = 0;
1106 		else
1107 			tail &= (pipe->buffers - 1);
1108 
1109 		head = pipe->nrbufs - tail;
1110 		if (head)
1111 			memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1112 		if (tail)
1113 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1114 	}
1115 
1116 	pipe->curbuf = 0;
1117 	kfree(pipe->bufs);
1118 	pipe->bufs = bufs;
1119 	pipe->buffers = nr_pages;
1120 	return nr_pages * PAGE_SIZE;
1121 
1122 out_revert_acct:
1123 	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1124 	return ret;
1125 }
1126 
1127 /*
1128  * This should work even if CONFIG_PROC_FS isn't set, as proc_dopipe_max_size
1129  * will return an error.
1130  */
1131 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1132 		 size_t *lenp, loff_t *ppos)
1133 {
1134 	return proc_dopipe_max_size(table, write, buf, lenp, ppos);
1135 }
1136 
1137 /*
1138  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1139  * location, so checking ->i_pipe is not enough to verify that this is a
1140  * pipe.
1141  */
1142 struct pipe_inode_info *get_pipe_info(struct file *file)
1143 {
1144 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1145 }
1146 
1147 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1148 {
1149 	struct pipe_inode_info *pipe;
1150 	long ret;
1151 
1152 	pipe = get_pipe_info(file);
1153 	if (!pipe)
1154 		return -EBADF;
1155 
1156 	__pipe_lock(pipe);
1157 
1158 	switch (cmd) {
1159 	case F_SETPIPE_SZ:
1160 		ret = pipe_set_size(pipe, arg);
1161 		break;
1162 	case F_GETPIPE_SZ:
1163 		ret = pipe->buffers * PAGE_SIZE;
1164 		break;
1165 	default:
1166 		ret = -EINVAL;
1167 		break;
1168 	}
1169 
1170 	__pipe_unlock(pipe);
1171 	return ret;
1172 }
1173 
1174 static const struct super_operations pipefs_ops = {
1175 	.destroy_inode = free_inode_nonrcu,
1176 	.statfs = simple_statfs,
1177 };
1178 
1179 /*
1180  * pipefs should _never_ be mounted by userland - too much of security hassle,
1181  * no real gain from having the whole whorehouse mounted. So we don't need
1182  * any operations on the root directory. However, we need a non-trivial
1183  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1184  */
1185 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1186 			 int flags, const char *dev_name, void *data)
1187 {
1188 	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1189 			&pipefs_dentry_operations, PIPEFS_MAGIC);
1190 }
1191 
1192 static struct file_system_type pipe_fs_type = {
1193 	.name		= "pipefs",
1194 	.mount		= pipefs_mount,
1195 	.kill_sb	= kill_anon_super,
1196 };
1197 
1198 static int __init init_pipe_fs(void)
1199 {
1200 	int err = register_filesystem(&pipe_fs_type);
1201 
1202 	if (!err) {
1203 		pipe_mnt = kern_mount(&pipe_fs_type);
1204 		if (IS_ERR(pipe_mnt)) {
1205 			err = PTR_ERR(pipe_mnt);
1206 			unregister_filesystem(&pipe_fs_type);
1207 		}
1208 	}
1209 	return err;
1210 }
1211 
1212 fs_initcall(init_pipe_fs);
1213