xref: /linux/fs/pipe.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
30 
31 #include "internal.h"
32 
33 /*
34  * The max size that a non-root user is allowed to grow the pipe. Can
35  * be set by root in /proc/sys/fs/pipe-max-size
36  */
37 unsigned int pipe_max_size = 1048576;
38 
39 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
40  * matches default values.
41  */
42 unsigned long pipe_user_pages_hard;
43 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
44 
45 /*
46  * We use head and tail indices that aren't masked off, except at the point of
47  * dereference, but rather they're allowed to wrap naturally.  This means there
48  * isn't a dead spot in the buffer, but the ring has to be a power of two and
49  * <= 2^31.
50  * -- David Howells 2019-09-23.
51  *
52  * Reads with count = 0 should always return 0.
53  * -- Julian Bradfield 1999-06-07.
54  *
55  * FIFOs and Pipes now generate SIGIO for both readers and writers.
56  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
57  *
58  * pipe_read & write cleanup
59  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
60  */
61 
62 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
63 {
64 	if (pipe->files)
65 		mutex_lock_nested(&pipe->mutex, subclass);
66 }
67 
68 void pipe_lock(struct pipe_inode_info *pipe)
69 {
70 	/*
71 	 * pipe_lock() nests non-pipe inode locks (for writing to a file)
72 	 */
73 	pipe_lock_nested(pipe, I_MUTEX_PARENT);
74 }
75 EXPORT_SYMBOL(pipe_lock);
76 
77 void pipe_unlock(struct pipe_inode_info *pipe)
78 {
79 	if (pipe->files)
80 		mutex_unlock(&pipe->mutex);
81 }
82 EXPORT_SYMBOL(pipe_unlock);
83 
84 static inline void __pipe_lock(struct pipe_inode_info *pipe)
85 {
86 	mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
87 }
88 
89 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
90 {
91 	mutex_unlock(&pipe->mutex);
92 }
93 
94 void pipe_double_lock(struct pipe_inode_info *pipe1,
95 		      struct pipe_inode_info *pipe2)
96 {
97 	BUG_ON(pipe1 == pipe2);
98 
99 	if (pipe1 < pipe2) {
100 		pipe_lock_nested(pipe1, I_MUTEX_PARENT);
101 		pipe_lock_nested(pipe2, I_MUTEX_CHILD);
102 	} else {
103 		pipe_lock_nested(pipe2, I_MUTEX_PARENT);
104 		pipe_lock_nested(pipe1, I_MUTEX_CHILD);
105 	}
106 }
107 
108 /* Drop the inode semaphore and wait for a pipe event, atomically */
109 void pipe_wait(struct pipe_inode_info *pipe)
110 {
111 	DEFINE_WAIT(wait);
112 
113 	/*
114 	 * Pipes are system-local resources, so sleeping on them
115 	 * is considered a noninteractive wait:
116 	 */
117 	prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
118 	pipe_unlock(pipe);
119 	schedule();
120 	finish_wait(&pipe->wait, &wait);
121 	pipe_lock(pipe);
122 }
123 
124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 				  struct pipe_buffer *buf)
126 {
127 	struct page *page = buf->page;
128 
129 	/*
130 	 * If nobody else uses this page, and we don't already have a
131 	 * temporary page, let's keep track of it as a one-deep
132 	 * allocation cache. (Otherwise just release our reference to it)
133 	 */
134 	if (page_count(page) == 1 && !pipe->tmp_page)
135 		pipe->tmp_page = page;
136 	else
137 		put_page(page);
138 }
139 
140 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
141 			       struct pipe_buffer *buf)
142 {
143 	struct page *page = buf->page;
144 
145 	if (page_count(page) == 1) {
146 		memcg_kmem_uncharge(page, 0);
147 		__SetPageLocked(page);
148 		return 0;
149 	}
150 	return 1;
151 }
152 
153 /**
154  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
155  * @pipe:	the pipe that the buffer belongs to
156  * @buf:	the buffer to attempt to steal
157  *
158  * Description:
159  *	This function attempts to steal the &struct page attached to
160  *	@buf. If successful, this function returns 0 and returns with
161  *	the page locked. The caller may then reuse the page for whatever
162  *	he wishes; the typical use is insertion into a different file
163  *	page cache.
164  */
165 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
166 			   struct pipe_buffer *buf)
167 {
168 	struct page *page = buf->page;
169 
170 	/*
171 	 * A reference of one is golden, that means that the owner of this
172 	 * page is the only one holding a reference to it. lock the page
173 	 * and return OK.
174 	 */
175 	if (page_count(page) == 1) {
176 		lock_page(page);
177 		return 0;
178 	}
179 
180 	return 1;
181 }
182 EXPORT_SYMBOL(generic_pipe_buf_steal);
183 
184 /**
185  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
186  * @pipe:	the pipe that the buffer belongs to
187  * @buf:	the buffer to get a reference to
188  *
189  * Description:
190  *	This function grabs an extra reference to @buf. It's used in
191  *	in the tee() system call, when we duplicate the buffers in one
192  *	pipe into another.
193  */
194 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
195 {
196 	return try_get_page(buf->page);
197 }
198 EXPORT_SYMBOL(generic_pipe_buf_get);
199 
200 /**
201  * generic_pipe_buf_confirm - verify contents of the pipe buffer
202  * @info:	the pipe that the buffer belongs to
203  * @buf:	the buffer to confirm
204  *
205  * Description:
206  *	This function does nothing, because the generic pipe code uses
207  *	pages that are always good when inserted into the pipe.
208  */
209 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
210 			     struct pipe_buffer *buf)
211 {
212 	return 0;
213 }
214 EXPORT_SYMBOL(generic_pipe_buf_confirm);
215 
216 /**
217  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
218  * @pipe:	the pipe that the buffer belongs to
219  * @buf:	the buffer to put a reference to
220  *
221  * Description:
222  *	This function releases a reference to @buf.
223  */
224 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
225 			      struct pipe_buffer *buf)
226 {
227 	put_page(buf->page);
228 }
229 EXPORT_SYMBOL(generic_pipe_buf_release);
230 
231 /* New data written to a pipe may be appended to a buffer with this type. */
232 static const struct pipe_buf_operations anon_pipe_buf_ops = {
233 	.confirm = generic_pipe_buf_confirm,
234 	.release = anon_pipe_buf_release,
235 	.steal = anon_pipe_buf_steal,
236 	.get = generic_pipe_buf_get,
237 };
238 
239 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
240 	.confirm = generic_pipe_buf_confirm,
241 	.release = anon_pipe_buf_release,
242 	.steal = anon_pipe_buf_steal,
243 	.get = generic_pipe_buf_get,
244 };
245 
246 static const struct pipe_buf_operations packet_pipe_buf_ops = {
247 	.confirm = generic_pipe_buf_confirm,
248 	.release = anon_pipe_buf_release,
249 	.steal = anon_pipe_buf_steal,
250 	.get = generic_pipe_buf_get,
251 };
252 
253 /**
254  * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
255  * @buf:	the buffer to mark
256  *
257  * Description:
258  *	This function ensures that no future writes will be merged into the
259  *	given &struct pipe_buffer. This is necessary when multiple pipe buffers
260  *	share the same backing page.
261  */
262 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
263 {
264 	if (buf->ops == &anon_pipe_buf_ops)
265 		buf->ops = &anon_pipe_buf_nomerge_ops;
266 }
267 
268 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
269 {
270 	return buf->ops == &anon_pipe_buf_ops;
271 }
272 
273 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
274 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
275 {
276 	unsigned int head = READ_ONCE(pipe->head);
277 	unsigned int tail = READ_ONCE(pipe->tail);
278 	unsigned int writers = READ_ONCE(pipe->writers);
279 
280 	return !pipe_empty(head, tail) || !writers;
281 }
282 
283 static ssize_t
284 pipe_read(struct kiocb *iocb, struct iov_iter *to)
285 {
286 	size_t total_len = iov_iter_count(to);
287 	struct file *filp = iocb->ki_filp;
288 	struct pipe_inode_info *pipe = filp->private_data;
289 	bool was_full;
290 	ssize_t ret;
291 
292 	/* Null read succeeds. */
293 	if (unlikely(total_len == 0))
294 		return 0;
295 
296 	ret = 0;
297 	__pipe_lock(pipe);
298 
299 	/*
300 	 * We only wake up writers if the pipe was full when we started
301 	 * reading in order to avoid unnecessary wakeups.
302 	 *
303 	 * But when we do wake up writers, we do so using a sync wakeup
304 	 * (WF_SYNC), because we want them to get going and generate more
305 	 * data for us.
306 	 */
307 	was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
308 	for (;;) {
309 		unsigned int head = pipe->head;
310 		unsigned int tail = pipe->tail;
311 		unsigned int mask = pipe->ring_size - 1;
312 
313 		if (!pipe_empty(head, tail)) {
314 			struct pipe_buffer *buf = &pipe->bufs[tail & mask];
315 			size_t chars = buf->len;
316 			size_t written;
317 			int error;
318 
319 			if (chars > total_len)
320 				chars = total_len;
321 
322 			error = pipe_buf_confirm(pipe, buf);
323 			if (error) {
324 				if (!ret)
325 					ret = error;
326 				break;
327 			}
328 
329 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
330 			if (unlikely(written < chars)) {
331 				if (!ret)
332 					ret = -EFAULT;
333 				break;
334 			}
335 			ret += chars;
336 			buf->offset += chars;
337 			buf->len -= chars;
338 
339 			/* Was it a packet buffer? Clean up and exit */
340 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
341 				total_len = chars;
342 				buf->len = 0;
343 			}
344 
345 			if (!buf->len) {
346 				pipe_buf_release(pipe, buf);
347 				spin_lock_irq(&pipe->wait.lock);
348 				tail++;
349 				pipe->tail = tail;
350 				spin_unlock_irq(&pipe->wait.lock);
351 			}
352 			total_len -= chars;
353 			if (!total_len)
354 				break;	/* common path: read succeeded */
355 			if (!pipe_empty(head, tail))	/* More to do? */
356 				continue;
357 		}
358 
359 		if (!pipe->writers)
360 			break;
361 		if (ret)
362 			break;
363 		if (filp->f_flags & O_NONBLOCK) {
364 			ret = -EAGAIN;
365 			break;
366 		}
367 		__pipe_unlock(pipe);
368 
369 		/*
370 		 * We only get here if we didn't actually read anything.
371 		 *
372 		 * However, we could have seen (and removed) a zero-sized
373 		 * pipe buffer, and might have made space in the buffers
374 		 * that way.
375 		 *
376 		 * You can't make zero-sized pipe buffers by doing an empty
377 		 * write (not even in packet mode), but they can happen if
378 		 * the writer gets an EFAULT when trying to fill a buffer
379 		 * that already got allocated and inserted in the buffer
380 		 * array.
381 		 *
382 		 * So we still need to wake up any pending writers in the
383 		 * _very_ unlikely case that the pipe was full, but we got
384 		 * no data.
385 		 */
386 		if (unlikely(was_full)) {
387 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
388 			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
389 		}
390 
391 		/*
392 		 * But because we didn't read anything, at this point we can
393 		 * just return directly with -ERESTARTSYS if we're interrupted,
394 		 * since we've done any required wakeups and there's no need
395 		 * to mark anything accessed. And we've dropped the lock.
396 		 */
397 		if (wait_event_interruptible(pipe->wait, pipe_readable(pipe)) < 0)
398 			return -ERESTARTSYS;
399 
400 		__pipe_lock(pipe);
401 		was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
402 	}
403 	__pipe_unlock(pipe);
404 
405 	if (was_full) {
406 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
407 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
408 	}
409 	if (ret > 0)
410 		file_accessed(filp);
411 	return ret;
412 }
413 
414 static inline int is_packetized(struct file *file)
415 {
416 	return (file->f_flags & O_DIRECT) != 0;
417 }
418 
419 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
420 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
421 {
422 	unsigned int head = READ_ONCE(pipe->head);
423 	unsigned int tail = READ_ONCE(pipe->tail);
424 	unsigned int max_usage = READ_ONCE(pipe->max_usage);
425 
426 	return !pipe_full(head, tail, max_usage) ||
427 		!READ_ONCE(pipe->readers);
428 }
429 
430 static ssize_t
431 pipe_write(struct kiocb *iocb, struct iov_iter *from)
432 {
433 	struct file *filp = iocb->ki_filp;
434 	struct pipe_inode_info *pipe = filp->private_data;
435 	unsigned int head;
436 	ssize_t ret = 0;
437 	size_t total_len = iov_iter_count(from);
438 	ssize_t chars;
439 	bool was_empty = false;
440 
441 	/* Null write succeeds. */
442 	if (unlikely(total_len == 0))
443 		return 0;
444 
445 	__pipe_lock(pipe);
446 
447 	if (!pipe->readers) {
448 		send_sig(SIGPIPE, current, 0);
449 		ret = -EPIPE;
450 		goto out;
451 	}
452 
453 	/*
454 	 * Only wake up if the pipe started out empty, since
455 	 * otherwise there should be no readers waiting.
456 	 *
457 	 * If it wasn't empty we try to merge new data into
458 	 * the last buffer.
459 	 *
460 	 * That naturally merges small writes, but it also
461 	 * page-aligs the rest of the writes for large writes
462 	 * spanning multiple pages.
463 	 */
464 	head = pipe->head;
465 	was_empty = pipe_empty(head, pipe->tail);
466 	chars = total_len & (PAGE_SIZE-1);
467 	if (chars && !was_empty) {
468 		unsigned int mask = pipe->ring_size - 1;
469 		struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
470 		int offset = buf->offset + buf->len;
471 
472 		if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
473 			ret = pipe_buf_confirm(pipe, buf);
474 			if (ret)
475 				goto out;
476 
477 			ret = copy_page_from_iter(buf->page, offset, chars, from);
478 			if (unlikely(ret < chars)) {
479 				ret = -EFAULT;
480 				goto out;
481 			}
482 
483 			buf->len += ret;
484 			if (!iov_iter_count(from))
485 				goto out;
486 		}
487 	}
488 
489 	for (;;) {
490 		if (!pipe->readers) {
491 			send_sig(SIGPIPE, current, 0);
492 			if (!ret)
493 				ret = -EPIPE;
494 			break;
495 		}
496 
497 		head = pipe->head;
498 		if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
499 			unsigned int mask = pipe->ring_size - 1;
500 			struct pipe_buffer *buf = &pipe->bufs[head & mask];
501 			struct page *page = pipe->tmp_page;
502 			int copied;
503 
504 			if (!page) {
505 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
506 				if (unlikely(!page)) {
507 					ret = ret ? : -ENOMEM;
508 					break;
509 				}
510 				pipe->tmp_page = page;
511 			}
512 
513 			/* Allocate a slot in the ring in advance and attach an
514 			 * empty buffer.  If we fault or otherwise fail to use
515 			 * it, either the reader will consume it or it'll still
516 			 * be there for the next write.
517 			 */
518 			spin_lock_irq(&pipe->wait.lock);
519 
520 			head = pipe->head;
521 			if (pipe_full(head, pipe->tail, pipe->max_usage)) {
522 				spin_unlock_irq(&pipe->wait.lock);
523 				continue;
524 			}
525 
526 			pipe->head = head + 1;
527 			spin_unlock_irq(&pipe->wait.lock);
528 
529 			/* Insert it into the buffer array */
530 			buf = &pipe->bufs[head & mask];
531 			buf->page = page;
532 			buf->ops = &anon_pipe_buf_ops;
533 			buf->offset = 0;
534 			buf->len = 0;
535 			buf->flags = 0;
536 			if (is_packetized(filp)) {
537 				buf->ops = &packet_pipe_buf_ops;
538 				buf->flags = PIPE_BUF_FLAG_PACKET;
539 			}
540 			pipe->tmp_page = NULL;
541 
542 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
543 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
544 				if (!ret)
545 					ret = -EFAULT;
546 				break;
547 			}
548 			ret += copied;
549 			buf->offset = 0;
550 			buf->len = copied;
551 
552 			if (!iov_iter_count(from))
553 				break;
554 		}
555 
556 		if (!pipe_full(head, pipe->tail, pipe->max_usage))
557 			continue;
558 
559 		/* Wait for buffer space to become available. */
560 		if (filp->f_flags & O_NONBLOCK) {
561 			if (!ret)
562 				ret = -EAGAIN;
563 			break;
564 		}
565 		if (signal_pending(current)) {
566 			if (!ret)
567 				ret = -ERESTARTSYS;
568 			break;
569 		}
570 
571 		/*
572 		 * We're going to release the pipe lock and wait for more
573 		 * space. We wake up any readers if necessary, and then
574 		 * after waiting we need to re-check whether the pipe
575 		 * become empty while we dropped the lock.
576 		 */
577 		__pipe_unlock(pipe);
578 		if (was_empty) {
579 			wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
580 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
581 		}
582 		wait_event_interruptible(pipe->wait, pipe_writable(pipe));
583 		__pipe_lock(pipe);
584 		was_empty = pipe_empty(head, pipe->tail);
585 	}
586 out:
587 	__pipe_unlock(pipe);
588 
589 	/*
590 	 * If we do do a wakeup event, we do a 'sync' wakeup, because we
591 	 * want the reader to start processing things asap, rather than
592 	 * leave the data pending.
593 	 *
594 	 * This is particularly important for small writes, because of
595 	 * how (for example) the GNU make jobserver uses small writes to
596 	 * wake up pending jobs
597 	 */
598 	if (was_empty) {
599 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
600 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
601 	}
602 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
603 		int err = file_update_time(filp);
604 		if (err)
605 			ret = err;
606 		sb_end_write(file_inode(filp)->i_sb);
607 	}
608 	return ret;
609 }
610 
611 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
612 {
613 	struct pipe_inode_info *pipe = filp->private_data;
614 	int count, head, tail, mask;
615 
616 	switch (cmd) {
617 		case FIONREAD:
618 			__pipe_lock(pipe);
619 			count = 0;
620 			head = pipe->head;
621 			tail = pipe->tail;
622 			mask = pipe->ring_size - 1;
623 
624 			while (tail != head) {
625 				count += pipe->bufs[tail & mask].len;
626 				tail++;
627 			}
628 			__pipe_unlock(pipe);
629 
630 			return put_user(count, (int __user *)arg);
631 		default:
632 			return -ENOIOCTLCMD;
633 	}
634 }
635 
636 /* No kernel lock held - fine */
637 static __poll_t
638 pipe_poll(struct file *filp, poll_table *wait)
639 {
640 	__poll_t mask;
641 	struct pipe_inode_info *pipe = filp->private_data;
642 	unsigned int head, tail;
643 
644 	/*
645 	 * Reading only -- no need for acquiring the semaphore.
646 	 *
647 	 * But because this is racy, the code has to add the
648 	 * entry to the poll table _first_ ..
649 	 */
650 	poll_wait(filp, &pipe->wait, wait);
651 
652 	/*
653 	 * .. and only then can you do the racy tests. That way,
654 	 * if something changes and you got it wrong, the poll
655 	 * table entry will wake you up and fix it.
656 	 */
657 	head = READ_ONCE(pipe->head);
658 	tail = READ_ONCE(pipe->tail);
659 
660 	mask = 0;
661 	if (filp->f_mode & FMODE_READ) {
662 		if (!pipe_empty(head, tail))
663 			mask |= EPOLLIN | EPOLLRDNORM;
664 		if (!pipe->writers && filp->f_version != pipe->w_counter)
665 			mask |= EPOLLHUP;
666 	}
667 
668 	if (filp->f_mode & FMODE_WRITE) {
669 		if (!pipe_full(head, tail, pipe->max_usage))
670 			mask |= EPOLLOUT | EPOLLWRNORM;
671 		/*
672 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
673 		 * behave exactly like pipes for poll().
674 		 */
675 		if (!pipe->readers)
676 			mask |= EPOLLERR;
677 	}
678 
679 	return mask;
680 }
681 
682 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
683 {
684 	int kill = 0;
685 
686 	spin_lock(&inode->i_lock);
687 	if (!--pipe->files) {
688 		inode->i_pipe = NULL;
689 		kill = 1;
690 	}
691 	spin_unlock(&inode->i_lock);
692 
693 	if (kill)
694 		free_pipe_info(pipe);
695 }
696 
697 static int
698 pipe_release(struct inode *inode, struct file *file)
699 {
700 	struct pipe_inode_info *pipe = file->private_data;
701 
702 	__pipe_lock(pipe);
703 	if (file->f_mode & FMODE_READ)
704 		pipe->readers--;
705 	if (file->f_mode & FMODE_WRITE)
706 		pipe->writers--;
707 
708 	if (pipe->readers || pipe->writers) {
709 		wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
710 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
711 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
712 	}
713 	__pipe_unlock(pipe);
714 
715 	put_pipe_info(inode, pipe);
716 	return 0;
717 }
718 
719 static int
720 pipe_fasync(int fd, struct file *filp, int on)
721 {
722 	struct pipe_inode_info *pipe = filp->private_data;
723 	int retval = 0;
724 
725 	__pipe_lock(pipe);
726 	if (filp->f_mode & FMODE_READ)
727 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
728 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
729 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
730 		if (retval < 0 && (filp->f_mode & FMODE_READ))
731 			/* this can happen only if on == T */
732 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
733 	}
734 	__pipe_unlock(pipe);
735 	return retval;
736 }
737 
738 static unsigned long account_pipe_buffers(struct user_struct *user,
739                                  unsigned long old, unsigned long new)
740 {
741 	return atomic_long_add_return(new - old, &user->pipe_bufs);
742 }
743 
744 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
745 {
746 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
747 
748 	return soft_limit && user_bufs > soft_limit;
749 }
750 
751 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
752 {
753 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
754 
755 	return hard_limit && user_bufs > hard_limit;
756 }
757 
758 static bool is_unprivileged_user(void)
759 {
760 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
761 }
762 
763 struct pipe_inode_info *alloc_pipe_info(void)
764 {
765 	struct pipe_inode_info *pipe;
766 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
767 	struct user_struct *user = get_current_user();
768 	unsigned long user_bufs;
769 	unsigned int max_size = READ_ONCE(pipe_max_size);
770 
771 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
772 	if (pipe == NULL)
773 		goto out_free_uid;
774 
775 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
776 		pipe_bufs = max_size >> PAGE_SHIFT;
777 
778 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
779 
780 	if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
781 		user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
782 		pipe_bufs = 1;
783 	}
784 
785 	if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
786 		goto out_revert_acct;
787 
788 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
789 			     GFP_KERNEL_ACCOUNT);
790 
791 	if (pipe->bufs) {
792 		init_waitqueue_head(&pipe->wait);
793 		pipe->r_counter = pipe->w_counter = 1;
794 		pipe->max_usage = pipe_bufs;
795 		pipe->ring_size = pipe_bufs;
796 		pipe->user = user;
797 		mutex_init(&pipe->mutex);
798 		return pipe;
799 	}
800 
801 out_revert_acct:
802 	(void) account_pipe_buffers(user, pipe_bufs, 0);
803 	kfree(pipe);
804 out_free_uid:
805 	free_uid(user);
806 	return NULL;
807 }
808 
809 void free_pipe_info(struct pipe_inode_info *pipe)
810 {
811 	int i;
812 
813 	(void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
814 	free_uid(pipe->user);
815 	for (i = 0; i < pipe->ring_size; i++) {
816 		struct pipe_buffer *buf = pipe->bufs + i;
817 		if (buf->ops)
818 			pipe_buf_release(pipe, buf);
819 	}
820 	if (pipe->tmp_page)
821 		__free_page(pipe->tmp_page);
822 	kfree(pipe->bufs);
823 	kfree(pipe);
824 }
825 
826 static struct vfsmount *pipe_mnt __read_mostly;
827 
828 /*
829  * pipefs_dname() is called from d_path().
830  */
831 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
832 {
833 	return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
834 				d_inode(dentry)->i_ino);
835 }
836 
837 static const struct dentry_operations pipefs_dentry_operations = {
838 	.d_dname	= pipefs_dname,
839 };
840 
841 static struct inode * get_pipe_inode(void)
842 {
843 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
844 	struct pipe_inode_info *pipe;
845 
846 	if (!inode)
847 		goto fail_inode;
848 
849 	inode->i_ino = get_next_ino();
850 
851 	pipe = alloc_pipe_info();
852 	if (!pipe)
853 		goto fail_iput;
854 
855 	inode->i_pipe = pipe;
856 	pipe->files = 2;
857 	pipe->readers = pipe->writers = 1;
858 	inode->i_fop = &pipefifo_fops;
859 
860 	/*
861 	 * Mark the inode dirty from the very beginning,
862 	 * that way it will never be moved to the dirty
863 	 * list because "mark_inode_dirty()" will think
864 	 * that it already _is_ on the dirty list.
865 	 */
866 	inode->i_state = I_DIRTY;
867 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
868 	inode->i_uid = current_fsuid();
869 	inode->i_gid = current_fsgid();
870 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
871 
872 	return inode;
873 
874 fail_iput:
875 	iput(inode);
876 
877 fail_inode:
878 	return NULL;
879 }
880 
881 int create_pipe_files(struct file **res, int flags)
882 {
883 	struct inode *inode = get_pipe_inode();
884 	struct file *f;
885 
886 	if (!inode)
887 		return -ENFILE;
888 
889 	f = alloc_file_pseudo(inode, pipe_mnt, "",
890 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
891 				&pipefifo_fops);
892 	if (IS_ERR(f)) {
893 		free_pipe_info(inode->i_pipe);
894 		iput(inode);
895 		return PTR_ERR(f);
896 	}
897 
898 	f->private_data = inode->i_pipe;
899 
900 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
901 				  &pipefifo_fops);
902 	if (IS_ERR(res[0])) {
903 		put_pipe_info(inode, inode->i_pipe);
904 		fput(f);
905 		return PTR_ERR(res[0]);
906 	}
907 	res[0]->private_data = inode->i_pipe;
908 	res[1] = f;
909 	stream_open(inode, res[0]);
910 	stream_open(inode, res[1]);
911 	return 0;
912 }
913 
914 static int __do_pipe_flags(int *fd, struct file **files, int flags)
915 {
916 	int error;
917 	int fdw, fdr;
918 
919 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
920 		return -EINVAL;
921 
922 	error = create_pipe_files(files, flags);
923 	if (error)
924 		return error;
925 
926 	error = get_unused_fd_flags(flags);
927 	if (error < 0)
928 		goto err_read_pipe;
929 	fdr = error;
930 
931 	error = get_unused_fd_flags(flags);
932 	if (error < 0)
933 		goto err_fdr;
934 	fdw = error;
935 
936 	audit_fd_pair(fdr, fdw);
937 	fd[0] = fdr;
938 	fd[1] = fdw;
939 	return 0;
940 
941  err_fdr:
942 	put_unused_fd(fdr);
943  err_read_pipe:
944 	fput(files[0]);
945 	fput(files[1]);
946 	return error;
947 }
948 
949 int do_pipe_flags(int *fd, int flags)
950 {
951 	struct file *files[2];
952 	int error = __do_pipe_flags(fd, files, flags);
953 	if (!error) {
954 		fd_install(fd[0], files[0]);
955 		fd_install(fd[1], files[1]);
956 	}
957 	return error;
958 }
959 
960 /*
961  * sys_pipe() is the normal C calling standard for creating
962  * a pipe. It's not the way Unix traditionally does this, though.
963  */
964 static int do_pipe2(int __user *fildes, int flags)
965 {
966 	struct file *files[2];
967 	int fd[2];
968 	int error;
969 
970 	error = __do_pipe_flags(fd, files, flags);
971 	if (!error) {
972 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
973 			fput(files[0]);
974 			fput(files[1]);
975 			put_unused_fd(fd[0]);
976 			put_unused_fd(fd[1]);
977 			error = -EFAULT;
978 		} else {
979 			fd_install(fd[0], files[0]);
980 			fd_install(fd[1], files[1]);
981 		}
982 	}
983 	return error;
984 }
985 
986 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
987 {
988 	return do_pipe2(fildes, flags);
989 }
990 
991 SYSCALL_DEFINE1(pipe, int __user *, fildes)
992 {
993 	return do_pipe2(fildes, 0);
994 }
995 
996 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
997 {
998 	int cur = *cnt;
999 
1000 	while (cur == *cnt) {
1001 		pipe_wait(pipe);
1002 		if (signal_pending(current))
1003 			break;
1004 	}
1005 	return cur == *cnt ? -ERESTARTSYS : 0;
1006 }
1007 
1008 static void wake_up_partner(struct pipe_inode_info *pipe)
1009 {
1010 	wake_up_interruptible(&pipe->wait);
1011 }
1012 
1013 static int fifo_open(struct inode *inode, struct file *filp)
1014 {
1015 	struct pipe_inode_info *pipe;
1016 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1017 	int ret;
1018 
1019 	filp->f_version = 0;
1020 
1021 	spin_lock(&inode->i_lock);
1022 	if (inode->i_pipe) {
1023 		pipe = inode->i_pipe;
1024 		pipe->files++;
1025 		spin_unlock(&inode->i_lock);
1026 	} else {
1027 		spin_unlock(&inode->i_lock);
1028 		pipe = alloc_pipe_info();
1029 		if (!pipe)
1030 			return -ENOMEM;
1031 		pipe->files = 1;
1032 		spin_lock(&inode->i_lock);
1033 		if (unlikely(inode->i_pipe)) {
1034 			inode->i_pipe->files++;
1035 			spin_unlock(&inode->i_lock);
1036 			free_pipe_info(pipe);
1037 			pipe = inode->i_pipe;
1038 		} else {
1039 			inode->i_pipe = pipe;
1040 			spin_unlock(&inode->i_lock);
1041 		}
1042 	}
1043 	filp->private_data = pipe;
1044 	/* OK, we have a pipe and it's pinned down */
1045 
1046 	__pipe_lock(pipe);
1047 
1048 	/* We can only do regular read/write on fifos */
1049 	stream_open(inode, filp);
1050 
1051 	switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1052 	case FMODE_READ:
1053 	/*
1054 	 *  O_RDONLY
1055 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1056 	 *  opened, even when there is no process writing the FIFO.
1057 	 */
1058 		pipe->r_counter++;
1059 		if (pipe->readers++ == 0)
1060 			wake_up_partner(pipe);
1061 
1062 		if (!is_pipe && !pipe->writers) {
1063 			if ((filp->f_flags & O_NONBLOCK)) {
1064 				/* suppress EPOLLHUP until we have
1065 				 * seen a writer */
1066 				filp->f_version = pipe->w_counter;
1067 			} else {
1068 				if (wait_for_partner(pipe, &pipe->w_counter))
1069 					goto err_rd;
1070 			}
1071 		}
1072 		break;
1073 
1074 	case FMODE_WRITE:
1075 	/*
1076 	 *  O_WRONLY
1077 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
1078 	 *  errno=ENXIO when there is no process reading the FIFO.
1079 	 */
1080 		ret = -ENXIO;
1081 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1082 			goto err;
1083 
1084 		pipe->w_counter++;
1085 		if (!pipe->writers++)
1086 			wake_up_partner(pipe);
1087 
1088 		if (!is_pipe && !pipe->readers) {
1089 			if (wait_for_partner(pipe, &pipe->r_counter))
1090 				goto err_wr;
1091 		}
1092 		break;
1093 
1094 	case FMODE_READ | FMODE_WRITE:
1095 	/*
1096 	 *  O_RDWR
1097 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1098 	 *  This implementation will NEVER block on a O_RDWR open, since
1099 	 *  the process can at least talk to itself.
1100 	 */
1101 
1102 		pipe->readers++;
1103 		pipe->writers++;
1104 		pipe->r_counter++;
1105 		pipe->w_counter++;
1106 		if (pipe->readers == 1 || pipe->writers == 1)
1107 			wake_up_partner(pipe);
1108 		break;
1109 
1110 	default:
1111 		ret = -EINVAL;
1112 		goto err;
1113 	}
1114 
1115 	/* Ok! */
1116 	__pipe_unlock(pipe);
1117 	return 0;
1118 
1119 err_rd:
1120 	if (!--pipe->readers)
1121 		wake_up_interruptible(&pipe->wait);
1122 	ret = -ERESTARTSYS;
1123 	goto err;
1124 
1125 err_wr:
1126 	if (!--pipe->writers)
1127 		wake_up_interruptible(&pipe->wait);
1128 	ret = -ERESTARTSYS;
1129 	goto err;
1130 
1131 err:
1132 	__pipe_unlock(pipe);
1133 
1134 	put_pipe_info(inode, pipe);
1135 	return ret;
1136 }
1137 
1138 const struct file_operations pipefifo_fops = {
1139 	.open		= fifo_open,
1140 	.llseek		= no_llseek,
1141 	.read_iter	= pipe_read,
1142 	.write_iter	= pipe_write,
1143 	.poll		= pipe_poll,
1144 	.unlocked_ioctl	= pipe_ioctl,
1145 	.release	= pipe_release,
1146 	.fasync		= pipe_fasync,
1147 };
1148 
1149 /*
1150  * Currently we rely on the pipe array holding a power-of-2 number
1151  * of pages. Returns 0 on error.
1152  */
1153 unsigned int round_pipe_size(unsigned long size)
1154 {
1155 	if (size > (1U << 31))
1156 		return 0;
1157 
1158 	/* Minimum pipe size, as required by POSIX */
1159 	if (size < PAGE_SIZE)
1160 		return PAGE_SIZE;
1161 
1162 	return roundup_pow_of_two(size);
1163 }
1164 
1165 /*
1166  * Allocate a new array of pipe buffers and copy the info over. Returns the
1167  * pipe size if successful, or return -ERROR on error.
1168  */
1169 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1170 {
1171 	struct pipe_buffer *bufs;
1172 	unsigned int size, nr_slots, head, tail, mask, n;
1173 	unsigned long user_bufs;
1174 	long ret = 0;
1175 
1176 	size = round_pipe_size(arg);
1177 	nr_slots = size >> PAGE_SHIFT;
1178 
1179 	if (!nr_slots)
1180 		return -EINVAL;
1181 
1182 	/*
1183 	 * If trying to increase the pipe capacity, check that an
1184 	 * unprivileged user is not trying to exceed various limits
1185 	 * (soft limit check here, hard limit check just below).
1186 	 * Decreasing the pipe capacity is always permitted, even
1187 	 * if the user is currently over a limit.
1188 	 */
1189 	if (nr_slots > pipe->ring_size &&
1190 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1191 		return -EPERM;
1192 
1193 	user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
1194 
1195 	if (nr_slots > pipe->ring_size &&
1196 			(too_many_pipe_buffers_hard(user_bufs) ||
1197 			 too_many_pipe_buffers_soft(user_bufs)) &&
1198 			is_unprivileged_user()) {
1199 		ret = -EPERM;
1200 		goto out_revert_acct;
1201 	}
1202 
1203 	/*
1204 	 * We can shrink the pipe, if arg is greater than the ring occupancy.
1205 	 * Since we don't expect a lot of shrink+grow operations, just free and
1206 	 * allocate again like we would do for growing.  If the pipe currently
1207 	 * contains more buffers than arg, then return busy.
1208 	 */
1209 	mask = pipe->ring_size - 1;
1210 	head = pipe->head;
1211 	tail = pipe->tail;
1212 	n = pipe_occupancy(pipe->head, pipe->tail);
1213 	if (nr_slots < n) {
1214 		ret = -EBUSY;
1215 		goto out_revert_acct;
1216 	}
1217 
1218 	bufs = kcalloc(nr_slots, sizeof(*bufs),
1219 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1220 	if (unlikely(!bufs)) {
1221 		ret = -ENOMEM;
1222 		goto out_revert_acct;
1223 	}
1224 
1225 	/*
1226 	 * The pipe array wraps around, so just start the new one at zero
1227 	 * and adjust the indices.
1228 	 */
1229 	if (n > 0) {
1230 		unsigned int h = head & mask;
1231 		unsigned int t = tail & mask;
1232 		if (h > t) {
1233 			memcpy(bufs, pipe->bufs + t,
1234 			       n * sizeof(struct pipe_buffer));
1235 		} else {
1236 			unsigned int tsize = pipe->ring_size - t;
1237 			if (h > 0)
1238 				memcpy(bufs + tsize, pipe->bufs,
1239 				       h * sizeof(struct pipe_buffer));
1240 			memcpy(bufs, pipe->bufs + t,
1241 			       tsize * sizeof(struct pipe_buffer));
1242 		}
1243 	}
1244 
1245 	head = n;
1246 	tail = 0;
1247 
1248 	kfree(pipe->bufs);
1249 	pipe->bufs = bufs;
1250 	pipe->ring_size = nr_slots;
1251 	pipe->max_usage = nr_slots;
1252 	pipe->tail = tail;
1253 	pipe->head = head;
1254 	wake_up_interruptible_all(&pipe->wait);
1255 	return pipe->max_usage * PAGE_SIZE;
1256 
1257 out_revert_acct:
1258 	(void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
1259 	return ret;
1260 }
1261 
1262 /*
1263  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1264  * location, so checking ->i_pipe is not enough to verify that this is a
1265  * pipe.
1266  */
1267 struct pipe_inode_info *get_pipe_info(struct file *file)
1268 {
1269 	return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1270 }
1271 
1272 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1273 {
1274 	struct pipe_inode_info *pipe;
1275 	long ret;
1276 
1277 	pipe = get_pipe_info(file);
1278 	if (!pipe)
1279 		return -EBADF;
1280 
1281 	__pipe_lock(pipe);
1282 
1283 	switch (cmd) {
1284 	case F_SETPIPE_SZ:
1285 		ret = pipe_set_size(pipe, arg);
1286 		break;
1287 	case F_GETPIPE_SZ:
1288 		ret = pipe->max_usage * PAGE_SIZE;
1289 		break;
1290 	default:
1291 		ret = -EINVAL;
1292 		break;
1293 	}
1294 
1295 	__pipe_unlock(pipe);
1296 	return ret;
1297 }
1298 
1299 static const struct super_operations pipefs_ops = {
1300 	.destroy_inode = free_inode_nonrcu,
1301 	.statfs = simple_statfs,
1302 };
1303 
1304 /*
1305  * pipefs should _never_ be mounted by userland - too much of security hassle,
1306  * no real gain from having the whole whorehouse mounted. So we don't need
1307  * any operations on the root directory. However, we need a non-trivial
1308  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1309  */
1310 
1311 static int pipefs_init_fs_context(struct fs_context *fc)
1312 {
1313 	struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1314 	if (!ctx)
1315 		return -ENOMEM;
1316 	ctx->ops = &pipefs_ops;
1317 	ctx->dops = &pipefs_dentry_operations;
1318 	return 0;
1319 }
1320 
1321 static struct file_system_type pipe_fs_type = {
1322 	.name		= "pipefs",
1323 	.init_fs_context = pipefs_init_fs_context,
1324 	.kill_sb	= kill_anon_super,
1325 };
1326 
1327 static int __init init_pipe_fs(void)
1328 {
1329 	int err = register_filesystem(&pipe_fs_type);
1330 
1331 	if (!err) {
1332 		pipe_mnt = kern_mount(&pipe_fs_type);
1333 		if (IS_ERR(pipe_mnt)) {
1334 			err = PTR_ERR(pipe_mnt);
1335 			unregister_filesystem(&pipe_fs_type);
1336 		}
1337 	}
1338 	return err;
1339 }
1340 
1341 fs_initcall(init_pipe_fs);
1342