xref: /linux/fs/pipe.c (revision 00a7d39898c8010bfd5ff62af31ca5db34421b38)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
28 #include <linux/sysctl.h>
29 
30 #include <linux/uaccess.h>
31 #include <asm/ioctls.h>
32 
33 #include "internal.h"
34 
35 /*
36  * New pipe buffers will be restricted to this size while the user is exceeding
37  * their pipe buffer quota. The general pipe use case needs at least two
38  * buffers: one for data yet to be read, and one for new data. If this is less
39  * than two, then a write to a non-empty pipe may block even if the pipe is not
40  * full. This can occur with GNU make jobserver or similar uses of pipes as
41  * semaphores: multiple processes may be waiting to write tokens back to the
42  * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
43  *
44  * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
45  * own risk, namely: pipe writes to non-full pipes may block until the pipe is
46  * emptied.
47  */
48 #define PIPE_MIN_DEF_BUFFERS 2
49 
50 /*
51  * The max size that a non-root user is allowed to grow the pipe. Can
52  * be set by root in /proc/sys/fs/pipe-max-size
53  */
54 static unsigned int pipe_max_size = 1048576;
55 
56 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
57  * matches default values.
58  */
59 static unsigned long pipe_user_pages_hard;
60 static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
61 
62 /*
63  * We use head and tail indices that aren't masked off, except at the point of
64  * dereference, but rather they're allowed to wrap naturally.  This means there
65  * isn't a dead spot in the buffer, but the ring has to be a power of two and
66  * <= 2^31.
67  * -- David Howells 2019-09-23.
68  *
69  * Reads with count = 0 should always return 0.
70  * -- Julian Bradfield 1999-06-07.
71  *
72  * FIFOs and Pipes now generate SIGIO for both readers and writers.
73  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
74  *
75  * pipe_read & write cleanup
76  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
77  */
78 
79 #define cmp_int(l, r)		((l > r) - (l < r))
80 
81 #ifdef CONFIG_PROVE_LOCKING
pipe_lock_cmp_fn(const struct lockdep_map * a,const struct lockdep_map * b)82 static int pipe_lock_cmp_fn(const struct lockdep_map *a,
83 			    const struct lockdep_map *b)
84 {
85 	return cmp_int((unsigned long) a, (unsigned long) b);
86 }
87 #endif
88 
pipe_lock(struct pipe_inode_info * pipe)89 void pipe_lock(struct pipe_inode_info *pipe)
90 {
91 	if (pipe->files)
92 		mutex_lock(&pipe->mutex);
93 }
94 EXPORT_SYMBOL(pipe_lock);
95 
pipe_unlock(struct pipe_inode_info * pipe)96 void pipe_unlock(struct pipe_inode_info *pipe)
97 {
98 	if (pipe->files)
99 		mutex_unlock(&pipe->mutex);
100 }
101 EXPORT_SYMBOL(pipe_unlock);
102 
pipe_double_lock(struct pipe_inode_info * pipe1,struct pipe_inode_info * pipe2)103 void pipe_double_lock(struct pipe_inode_info *pipe1,
104 		      struct pipe_inode_info *pipe2)
105 {
106 	BUG_ON(pipe1 == pipe2);
107 
108 	if (pipe1 > pipe2)
109 		swap(pipe1, pipe2);
110 
111 	pipe_lock(pipe1);
112 	pipe_lock(pipe2);
113 }
114 
anon_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)115 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
116 				  struct pipe_buffer *buf)
117 {
118 	struct page *page = buf->page;
119 
120 	/*
121 	 * If nobody else uses this page, and we don't already have a
122 	 * temporary page, let's keep track of it as a one-deep
123 	 * allocation cache. (Otherwise just release our reference to it)
124 	 */
125 	if (page_count(page) == 1 && !pipe->tmp_page)
126 		pipe->tmp_page = page;
127 	else
128 		put_page(page);
129 }
130 
anon_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)131 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
132 		struct pipe_buffer *buf)
133 {
134 	struct page *page = buf->page;
135 
136 	if (page_count(page) != 1)
137 		return false;
138 	memcg_kmem_uncharge_page(page, 0);
139 	__SetPageLocked(page);
140 	return true;
141 }
142 
143 /**
144  * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
145  * @pipe:	the pipe that the buffer belongs to
146  * @buf:	the buffer to attempt to steal
147  *
148  * Description:
149  *	This function attempts to steal the &struct page attached to
150  *	@buf. If successful, this function returns 0 and returns with
151  *	the page locked. The caller may then reuse the page for whatever
152  *	he wishes; the typical use is insertion into a different file
153  *	page cache.
154  */
generic_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)155 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
156 		struct pipe_buffer *buf)
157 {
158 	struct page *page = buf->page;
159 
160 	/*
161 	 * A reference of one is golden, that means that the owner of this
162 	 * page is the only one holding a reference to it. lock the page
163 	 * and return OK.
164 	 */
165 	if (page_count(page) == 1) {
166 		lock_page(page);
167 		return true;
168 	}
169 	return false;
170 }
171 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
172 
173 /**
174  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
175  * @pipe:	the pipe that the buffer belongs to
176  * @buf:	the buffer to get a reference to
177  *
178  * Description:
179  *	This function grabs an extra reference to @buf. It's used in
180  *	the tee() system call, when we duplicate the buffers in one
181  *	pipe into another.
182  */
generic_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)183 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
184 {
185 	return try_get_page(buf->page);
186 }
187 EXPORT_SYMBOL(generic_pipe_buf_get);
188 
189 /**
190  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
191  * @pipe:	the pipe that the buffer belongs to
192  * @buf:	the buffer to put a reference to
193  *
194  * Description:
195  *	This function releases a reference to @buf.
196  */
generic_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)197 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
198 			      struct pipe_buffer *buf)
199 {
200 	put_page(buf->page);
201 }
202 EXPORT_SYMBOL(generic_pipe_buf_release);
203 
204 static const struct pipe_buf_operations anon_pipe_buf_ops = {
205 	.release	= anon_pipe_buf_release,
206 	.try_steal	= anon_pipe_buf_try_steal,
207 	.get		= generic_pipe_buf_get,
208 };
209 
210 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_readable(const struct pipe_inode_info * pipe)211 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
212 {
213 	union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) };
214 	unsigned int writers = READ_ONCE(pipe->writers);
215 
216 	return !pipe_empty(idx.head, idx.tail) || !writers;
217 }
218 
pipe_update_tail(struct pipe_inode_info * pipe,struct pipe_buffer * buf,unsigned int tail)219 static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
220 					    struct pipe_buffer *buf,
221 					    unsigned int tail)
222 {
223 	pipe_buf_release(pipe, buf);
224 
225 	/*
226 	 * If the pipe has a watch_queue, we need additional protection
227 	 * by the spinlock because notifications get posted with only
228 	 * this spinlock, no mutex
229 	 */
230 	if (pipe_has_watch_queue(pipe)) {
231 		spin_lock_irq(&pipe->rd_wait.lock);
232 #ifdef CONFIG_WATCH_QUEUE
233 		if (buf->flags & PIPE_BUF_FLAG_LOSS)
234 			pipe->note_loss = true;
235 #endif
236 		pipe->tail = ++tail;
237 		spin_unlock_irq(&pipe->rd_wait.lock);
238 		return tail;
239 	}
240 
241 	/*
242 	 * Without a watch_queue, we can simply increment the tail
243 	 * without the spinlock - the mutex is enough.
244 	 */
245 	pipe->tail = ++tail;
246 	return tail;
247 }
248 
249 static ssize_t
pipe_read(struct kiocb * iocb,struct iov_iter * to)250 pipe_read(struct kiocb *iocb, struct iov_iter *to)
251 {
252 	size_t total_len = iov_iter_count(to);
253 	struct file *filp = iocb->ki_filp;
254 	struct pipe_inode_info *pipe = filp->private_data;
255 	bool wake_writer = false, wake_next_reader = false;
256 	ssize_t ret;
257 
258 	/* Null read succeeds. */
259 	if (unlikely(total_len == 0))
260 		return 0;
261 
262 	ret = 0;
263 	mutex_lock(&pipe->mutex);
264 
265 	/*
266 	 * We only wake up writers if the pipe was full when we started reading
267 	 * and it is no longer full after reading to avoid unnecessary wakeups.
268 	 *
269 	 * But when we do wake up writers, we do so using a sync wakeup
270 	 * (WF_SYNC), because we want them to get going and generate more
271 	 * data for us.
272 	 */
273 	for (;;) {
274 		/* Read ->head with a barrier vs post_one_notification() */
275 		unsigned int head = smp_load_acquire(&pipe->head);
276 		unsigned int tail = pipe->tail;
277 		unsigned int mask = pipe->ring_size - 1;
278 
279 #ifdef CONFIG_WATCH_QUEUE
280 		if (pipe->note_loss) {
281 			struct watch_notification n;
282 
283 			if (total_len < 8) {
284 				if (ret == 0)
285 					ret = -ENOBUFS;
286 				break;
287 			}
288 
289 			n.type = WATCH_TYPE_META;
290 			n.subtype = WATCH_META_LOSS_NOTIFICATION;
291 			n.info = watch_sizeof(n);
292 			if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
293 				if (ret == 0)
294 					ret = -EFAULT;
295 				break;
296 			}
297 			ret += sizeof(n);
298 			total_len -= sizeof(n);
299 			pipe->note_loss = false;
300 		}
301 #endif
302 
303 		if (!pipe_empty(head, tail)) {
304 			struct pipe_buffer *buf = &pipe->bufs[tail & mask];
305 			size_t chars = buf->len;
306 			size_t written;
307 			int error;
308 
309 			if (chars > total_len) {
310 				if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
311 					if (ret == 0)
312 						ret = -ENOBUFS;
313 					break;
314 				}
315 				chars = total_len;
316 			}
317 
318 			error = pipe_buf_confirm(pipe, buf);
319 			if (error) {
320 				if (!ret)
321 					ret = error;
322 				break;
323 			}
324 
325 			written = copy_page_to_iter(buf->page, buf->offset, chars, to);
326 			if (unlikely(written < chars)) {
327 				if (!ret)
328 					ret = -EFAULT;
329 				break;
330 			}
331 			ret += chars;
332 			buf->offset += chars;
333 			buf->len -= chars;
334 
335 			/* Was it a packet buffer? Clean up and exit */
336 			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
337 				total_len = chars;
338 				buf->len = 0;
339 			}
340 
341 			if (!buf->len) {
342 				wake_writer |= pipe_full(head, tail, pipe->max_usage);
343 				tail = pipe_update_tail(pipe, buf, tail);
344 			}
345 			total_len -= chars;
346 			if (!total_len)
347 				break;	/* common path: read succeeded */
348 			if (!pipe_empty(head, tail))	/* More to do? */
349 				continue;
350 		}
351 
352 		if (!pipe->writers)
353 			break;
354 		if (ret)
355 			break;
356 		if ((filp->f_flags & O_NONBLOCK) ||
357 		    (iocb->ki_flags & IOCB_NOWAIT)) {
358 			ret = -EAGAIN;
359 			break;
360 		}
361 		mutex_unlock(&pipe->mutex);
362 
363 		/*
364 		 * We only get here if we didn't actually read anything.
365 		 *
366 		 * However, we could have seen (and removed) a zero-sized
367 		 * pipe buffer, and might have made space in the buffers
368 		 * that way.
369 		 *
370 		 * You can't make zero-sized pipe buffers by doing an empty
371 		 * write (not even in packet mode), but they can happen if
372 		 * the writer gets an EFAULT when trying to fill a buffer
373 		 * that already got allocated and inserted in the buffer
374 		 * array.
375 		 *
376 		 * So we still need to wake up any pending writers in the
377 		 * _very_ unlikely case that the pipe was full, but we got
378 		 * no data.
379 		 */
380 		if (unlikely(wake_writer))
381 			wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
382 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
383 
384 		/*
385 		 * But because we didn't read anything, at this point we can
386 		 * just return directly with -ERESTARTSYS if we're interrupted,
387 		 * since we've done any required wakeups and there's no need
388 		 * to mark anything accessed. And we've dropped the lock.
389 		 */
390 		if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
391 			return -ERESTARTSYS;
392 
393 		wake_writer = false;
394 		wake_next_reader = true;
395 		mutex_lock(&pipe->mutex);
396 	}
397 	if (pipe_is_empty(pipe))
398 		wake_next_reader = false;
399 	mutex_unlock(&pipe->mutex);
400 
401 	if (wake_writer)
402 		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
403 	if (wake_next_reader)
404 		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
405 	kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
406 	if (ret > 0)
407 		file_accessed(filp);
408 	return ret;
409 }
410 
is_packetized(struct file * file)411 static inline int is_packetized(struct file *file)
412 {
413 	return (file->f_flags & O_DIRECT) != 0;
414 }
415 
416 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
pipe_writable(const struct pipe_inode_info * pipe)417 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
418 {
419 	union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) };
420 	unsigned int max_usage = READ_ONCE(pipe->max_usage);
421 
422 	return !pipe_full(idx.head, idx.tail, max_usage) ||
423 		!READ_ONCE(pipe->readers);
424 }
425 
426 static ssize_t
pipe_write(struct kiocb * iocb,struct iov_iter * from)427 pipe_write(struct kiocb *iocb, struct iov_iter *from)
428 {
429 	struct file *filp = iocb->ki_filp;
430 	struct pipe_inode_info *pipe = filp->private_data;
431 	unsigned int head;
432 	ssize_t ret = 0;
433 	size_t total_len = iov_iter_count(from);
434 	ssize_t chars;
435 	bool was_empty = false;
436 	bool wake_next_writer = false;
437 
438 	/*
439 	 * Reject writing to watch queue pipes before the point where we lock
440 	 * the pipe.
441 	 * Otherwise, lockdep would be unhappy if the caller already has another
442 	 * pipe locked.
443 	 * If we had to support locking a normal pipe and a notification pipe at
444 	 * the same time, we could set up lockdep annotations for that, but
445 	 * since we don't actually need that, it's simpler to just bail here.
446 	 */
447 	if (pipe_has_watch_queue(pipe))
448 		return -EXDEV;
449 
450 	/* Null write succeeds. */
451 	if (unlikely(total_len == 0))
452 		return 0;
453 
454 	mutex_lock(&pipe->mutex);
455 
456 	if (!pipe->readers) {
457 		send_sig(SIGPIPE, current, 0);
458 		ret = -EPIPE;
459 		goto out;
460 	}
461 
462 	/*
463 	 * If it wasn't empty we try to merge new data into
464 	 * the last buffer.
465 	 *
466 	 * That naturally merges small writes, but it also
467 	 * page-aligns the rest of the writes for large writes
468 	 * spanning multiple pages.
469 	 */
470 	head = pipe->head;
471 	was_empty = pipe_empty(head, pipe->tail);
472 	chars = total_len & (PAGE_SIZE-1);
473 	if (chars && !was_empty) {
474 		unsigned int mask = pipe->ring_size - 1;
475 		struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
476 		int offset = buf->offset + buf->len;
477 
478 		if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
479 		    offset + chars <= PAGE_SIZE) {
480 			ret = pipe_buf_confirm(pipe, buf);
481 			if (ret)
482 				goto out;
483 
484 			ret = copy_page_from_iter(buf->page, offset, chars, from);
485 			if (unlikely(ret < chars)) {
486 				ret = -EFAULT;
487 				goto out;
488 			}
489 
490 			buf->len += ret;
491 			if (!iov_iter_count(from))
492 				goto out;
493 		}
494 	}
495 
496 	for (;;) {
497 		if (!pipe->readers) {
498 			send_sig(SIGPIPE, current, 0);
499 			if (!ret)
500 				ret = -EPIPE;
501 			break;
502 		}
503 
504 		head = pipe->head;
505 		if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
506 			unsigned int mask = pipe->ring_size - 1;
507 			struct pipe_buffer *buf;
508 			struct page *page = pipe->tmp_page;
509 			int copied;
510 
511 			if (!page) {
512 				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
513 				if (unlikely(!page)) {
514 					ret = ret ? : -ENOMEM;
515 					break;
516 				}
517 				pipe->tmp_page = page;
518 			}
519 
520 			/* Allocate a slot in the ring in advance and attach an
521 			 * empty buffer.  If we fault or otherwise fail to use
522 			 * it, either the reader will consume it or it'll still
523 			 * be there for the next write.
524 			 */
525 			pipe->head = head + 1;
526 
527 			/* Insert it into the buffer array */
528 			buf = &pipe->bufs[head & mask];
529 			buf->page = page;
530 			buf->ops = &anon_pipe_buf_ops;
531 			buf->offset = 0;
532 			buf->len = 0;
533 			if (is_packetized(filp))
534 				buf->flags = PIPE_BUF_FLAG_PACKET;
535 			else
536 				buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
537 			pipe->tmp_page = NULL;
538 
539 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
540 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
541 				if (!ret)
542 					ret = -EFAULT;
543 				break;
544 			}
545 			ret += copied;
546 			buf->len = copied;
547 
548 			if (!iov_iter_count(from))
549 				break;
550 		}
551 
552 		if (!pipe_full(head, pipe->tail, pipe->max_usage))
553 			continue;
554 
555 		/* Wait for buffer space to become available. */
556 		if ((filp->f_flags & O_NONBLOCK) ||
557 		    (iocb->ki_flags & IOCB_NOWAIT)) {
558 			if (!ret)
559 				ret = -EAGAIN;
560 			break;
561 		}
562 		if (signal_pending(current)) {
563 			if (!ret)
564 				ret = -ERESTARTSYS;
565 			break;
566 		}
567 
568 		/*
569 		 * We're going to release the pipe lock and wait for more
570 		 * space. We wake up any readers if necessary, and then
571 		 * after waiting we need to re-check whether the pipe
572 		 * become empty while we dropped the lock.
573 		 */
574 		mutex_unlock(&pipe->mutex);
575 		if (was_empty)
576 			wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
577 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
578 		wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
579 		mutex_lock(&pipe->mutex);
580 		was_empty = pipe_is_empty(pipe);
581 		wake_next_writer = true;
582 	}
583 out:
584 	if (pipe_is_full(pipe))
585 		wake_next_writer = false;
586 	mutex_unlock(&pipe->mutex);
587 
588 	/*
589 	 * If we do do a wakeup event, we do a 'sync' wakeup, because we
590 	 * want the reader to start processing things asap, rather than
591 	 * leave the data pending.
592 	 *
593 	 * This is particularly important for small writes, because of
594 	 * how (for example) the GNU make jobserver uses small writes to
595 	 * wake up pending jobs
596 	 *
597 	 * Epoll nonsensically wants a wakeup whether the pipe
598 	 * was already empty or not.
599 	 */
600 	if (was_empty || pipe->poll_usage)
601 		wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
602 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
603 	if (wake_next_writer)
604 		wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
605 	if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
606 		int err = file_update_time(filp);
607 		if (err)
608 			ret = err;
609 		sb_end_write(file_inode(filp)->i_sb);
610 	}
611 	return ret;
612 }
613 
pipe_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)614 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
615 {
616 	struct pipe_inode_info *pipe = filp->private_data;
617 	unsigned int count, head, tail;
618 
619 	switch (cmd) {
620 	case FIONREAD:
621 		mutex_lock(&pipe->mutex);
622 		count = 0;
623 		head = pipe->head;
624 		tail = pipe->tail;
625 
626 		while (!pipe_empty(head, tail)) {
627 			count += pipe_buf(pipe, tail)->len;
628 			tail++;
629 		}
630 		mutex_unlock(&pipe->mutex);
631 
632 		return put_user(count, (int __user *)arg);
633 
634 #ifdef CONFIG_WATCH_QUEUE
635 	case IOC_WATCH_QUEUE_SET_SIZE: {
636 		int ret;
637 		mutex_lock(&pipe->mutex);
638 		ret = watch_queue_set_size(pipe, arg);
639 		mutex_unlock(&pipe->mutex);
640 		return ret;
641 	}
642 
643 	case IOC_WATCH_QUEUE_SET_FILTER:
644 		return watch_queue_set_filter(
645 			pipe, (struct watch_notification_filter __user *)arg);
646 #endif
647 
648 	default:
649 		return -ENOIOCTLCMD;
650 	}
651 }
652 
653 /* No kernel lock held - fine */
654 static __poll_t
pipe_poll(struct file * filp,poll_table * wait)655 pipe_poll(struct file *filp, poll_table *wait)
656 {
657 	__poll_t mask;
658 	struct pipe_inode_info *pipe = filp->private_data;
659 	union pipe_index idx;
660 
661 	/* Epoll has some historical nasty semantics, this enables them */
662 	WRITE_ONCE(pipe->poll_usage, true);
663 
664 	/*
665 	 * Reading pipe state only -- no need for acquiring the semaphore.
666 	 *
667 	 * But because this is racy, the code has to add the
668 	 * entry to the poll table _first_ ..
669 	 */
670 	if (filp->f_mode & FMODE_READ)
671 		poll_wait(filp, &pipe->rd_wait, wait);
672 	if (filp->f_mode & FMODE_WRITE)
673 		poll_wait(filp, &pipe->wr_wait, wait);
674 
675 	/*
676 	 * .. and only then can you do the racy tests. That way,
677 	 * if something changes and you got it wrong, the poll
678 	 * table entry will wake you up and fix it.
679 	 */
680 	idx.head_tail = READ_ONCE(pipe->head_tail);
681 
682 	mask = 0;
683 	if (filp->f_mode & FMODE_READ) {
684 		if (!pipe_empty(idx.head, idx.tail))
685 			mask |= EPOLLIN | EPOLLRDNORM;
686 		if (!pipe->writers && filp->f_pipe != pipe->w_counter)
687 			mask |= EPOLLHUP;
688 	}
689 
690 	if (filp->f_mode & FMODE_WRITE) {
691 		if (!pipe_full(idx.head, idx.tail, pipe->max_usage))
692 			mask |= EPOLLOUT | EPOLLWRNORM;
693 		/*
694 		 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
695 		 * behave exactly like pipes for poll().
696 		 */
697 		if (!pipe->readers)
698 			mask |= EPOLLERR;
699 	}
700 
701 	return mask;
702 }
703 
put_pipe_info(struct inode * inode,struct pipe_inode_info * pipe)704 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
705 {
706 	int kill = 0;
707 
708 	spin_lock(&inode->i_lock);
709 	if (!--pipe->files) {
710 		inode->i_pipe = NULL;
711 		kill = 1;
712 	}
713 	spin_unlock(&inode->i_lock);
714 
715 	if (kill)
716 		free_pipe_info(pipe);
717 }
718 
719 static int
pipe_release(struct inode * inode,struct file * file)720 pipe_release(struct inode *inode, struct file *file)
721 {
722 	struct pipe_inode_info *pipe = file->private_data;
723 
724 	mutex_lock(&pipe->mutex);
725 	if (file->f_mode & FMODE_READ)
726 		pipe->readers--;
727 	if (file->f_mode & FMODE_WRITE)
728 		pipe->writers--;
729 
730 	/* Was that the last reader or writer, but not the other side? */
731 	if (!pipe->readers != !pipe->writers) {
732 		wake_up_interruptible_all(&pipe->rd_wait);
733 		wake_up_interruptible_all(&pipe->wr_wait);
734 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
735 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
736 	}
737 	mutex_unlock(&pipe->mutex);
738 
739 	put_pipe_info(inode, pipe);
740 	return 0;
741 }
742 
743 static int
pipe_fasync(int fd,struct file * filp,int on)744 pipe_fasync(int fd, struct file *filp, int on)
745 {
746 	struct pipe_inode_info *pipe = filp->private_data;
747 	int retval = 0;
748 
749 	mutex_lock(&pipe->mutex);
750 	if (filp->f_mode & FMODE_READ)
751 		retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
752 	if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
753 		retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
754 		if (retval < 0 && (filp->f_mode & FMODE_READ))
755 			/* this can happen only if on == T */
756 			fasync_helper(-1, filp, 0, &pipe->fasync_readers);
757 	}
758 	mutex_unlock(&pipe->mutex);
759 	return retval;
760 }
761 
account_pipe_buffers(struct user_struct * user,unsigned long old,unsigned long new)762 unsigned long account_pipe_buffers(struct user_struct *user,
763 				   unsigned long old, unsigned long new)
764 {
765 	return atomic_long_add_return(new - old, &user->pipe_bufs);
766 }
767 
too_many_pipe_buffers_soft(unsigned long user_bufs)768 bool too_many_pipe_buffers_soft(unsigned long user_bufs)
769 {
770 	unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
771 
772 	return soft_limit && user_bufs > soft_limit;
773 }
774 
too_many_pipe_buffers_hard(unsigned long user_bufs)775 bool too_many_pipe_buffers_hard(unsigned long user_bufs)
776 {
777 	unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
778 
779 	return hard_limit && user_bufs > hard_limit;
780 }
781 
pipe_is_unprivileged_user(void)782 bool pipe_is_unprivileged_user(void)
783 {
784 	return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
785 }
786 
alloc_pipe_info(void)787 struct pipe_inode_info *alloc_pipe_info(void)
788 {
789 	struct pipe_inode_info *pipe;
790 	unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
791 	struct user_struct *user = get_current_user();
792 	unsigned long user_bufs;
793 	unsigned int max_size = READ_ONCE(pipe_max_size);
794 
795 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
796 	if (pipe == NULL)
797 		goto out_free_uid;
798 
799 	if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
800 		pipe_bufs = max_size >> PAGE_SHIFT;
801 
802 	user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
803 
804 	if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
805 		user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
806 		pipe_bufs = PIPE_MIN_DEF_BUFFERS;
807 	}
808 
809 	if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
810 		goto out_revert_acct;
811 
812 	pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
813 			     GFP_KERNEL_ACCOUNT);
814 
815 	if (pipe->bufs) {
816 		init_waitqueue_head(&pipe->rd_wait);
817 		init_waitqueue_head(&pipe->wr_wait);
818 		pipe->r_counter = pipe->w_counter = 1;
819 		pipe->max_usage = pipe_bufs;
820 		pipe->ring_size = pipe_bufs;
821 		pipe->nr_accounted = pipe_bufs;
822 		pipe->user = user;
823 		mutex_init(&pipe->mutex);
824 		lock_set_cmp_fn(&pipe->mutex, pipe_lock_cmp_fn, NULL);
825 		return pipe;
826 	}
827 
828 out_revert_acct:
829 	(void) account_pipe_buffers(user, pipe_bufs, 0);
830 	kfree(pipe);
831 out_free_uid:
832 	free_uid(user);
833 	return NULL;
834 }
835 
free_pipe_info(struct pipe_inode_info * pipe)836 void free_pipe_info(struct pipe_inode_info *pipe)
837 {
838 	unsigned int i;
839 
840 #ifdef CONFIG_WATCH_QUEUE
841 	if (pipe->watch_queue)
842 		watch_queue_clear(pipe->watch_queue);
843 #endif
844 
845 	(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
846 	free_uid(pipe->user);
847 	for (i = 0; i < pipe->ring_size; i++) {
848 		struct pipe_buffer *buf = pipe->bufs + i;
849 		if (buf->ops)
850 			pipe_buf_release(pipe, buf);
851 	}
852 #ifdef CONFIG_WATCH_QUEUE
853 	if (pipe->watch_queue)
854 		put_watch_queue(pipe->watch_queue);
855 #endif
856 	if (pipe->tmp_page)
857 		__free_page(pipe->tmp_page);
858 	kfree(pipe->bufs);
859 	kfree(pipe);
860 }
861 
862 static struct vfsmount *pipe_mnt __ro_after_init;
863 
864 /*
865  * pipefs_dname() is called from d_path().
866  */
pipefs_dname(struct dentry * dentry,char * buffer,int buflen)867 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
868 {
869 	return dynamic_dname(buffer, buflen, "pipe:[%lu]",
870 				d_inode(dentry)->i_ino);
871 }
872 
873 static const struct dentry_operations pipefs_dentry_operations = {
874 	.d_dname	= pipefs_dname,
875 };
876 
get_pipe_inode(void)877 static struct inode * get_pipe_inode(void)
878 {
879 	struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
880 	struct pipe_inode_info *pipe;
881 
882 	if (!inode)
883 		goto fail_inode;
884 
885 	inode->i_ino = get_next_ino();
886 
887 	pipe = alloc_pipe_info();
888 	if (!pipe)
889 		goto fail_iput;
890 
891 	inode->i_pipe = pipe;
892 	pipe->files = 2;
893 	pipe->readers = pipe->writers = 1;
894 	inode->i_fop = &pipefifo_fops;
895 
896 	/*
897 	 * Mark the inode dirty from the very beginning,
898 	 * that way it will never be moved to the dirty
899 	 * list because "mark_inode_dirty()" will think
900 	 * that it already _is_ on the dirty list.
901 	 */
902 	inode->i_state = I_DIRTY;
903 	inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
904 	inode->i_uid = current_fsuid();
905 	inode->i_gid = current_fsgid();
906 	simple_inode_init_ts(inode);
907 
908 	return inode;
909 
910 fail_iput:
911 	iput(inode);
912 
913 fail_inode:
914 	return NULL;
915 }
916 
create_pipe_files(struct file ** res,int flags)917 int create_pipe_files(struct file **res, int flags)
918 {
919 	struct inode *inode = get_pipe_inode();
920 	struct file *f;
921 	int error;
922 
923 	if (!inode)
924 		return -ENFILE;
925 
926 	if (flags & O_NOTIFICATION_PIPE) {
927 		error = watch_queue_init(inode->i_pipe);
928 		if (error) {
929 			free_pipe_info(inode->i_pipe);
930 			iput(inode);
931 			return error;
932 		}
933 	}
934 
935 	f = alloc_file_pseudo(inode, pipe_mnt, "",
936 				O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
937 				&pipefifo_fops);
938 	if (IS_ERR(f)) {
939 		free_pipe_info(inode->i_pipe);
940 		iput(inode);
941 		return PTR_ERR(f);
942 	}
943 
944 	f->private_data = inode->i_pipe;
945 	f->f_pipe = 0;
946 
947 	res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
948 				  &pipefifo_fops);
949 	if (IS_ERR(res[0])) {
950 		put_pipe_info(inode, inode->i_pipe);
951 		fput(f);
952 		return PTR_ERR(res[0]);
953 	}
954 	res[0]->private_data = inode->i_pipe;
955 	res[0]->f_pipe = 0;
956 	res[1] = f;
957 	stream_open(inode, res[0]);
958 	stream_open(inode, res[1]);
959 	/*
960 	 * Disable permission and pre-content events, but enable legacy
961 	 * inotify events for legacy users.
962 	 */
963 	file_set_fsnotify_mode(res[0], FMODE_NONOTIFY_PERM);
964 	file_set_fsnotify_mode(res[1], FMODE_NONOTIFY_PERM);
965 	return 0;
966 }
967 
__do_pipe_flags(int * fd,struct file ** files,int flags)968 static int __do_pipe_flags(int *fd, struct file **files, int flags)
969 {
970 	int error;
971 	int fdw, fdr;
972 
973 	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
974 		return -EINVAL;
975 
976 	error = create_pipe_files(files, flags);
977 	if (error)
978 		return error;
979 
980 	error = get_unused_fd_flags(flags);
981 	if (error < 0)
982 		goto err_read_pipe;
983 	fdr = error;
984 
985 	error = get_unused_fd_flags(flags);
986 	if (error < 0)
987 		goto err_fdr;
988 	fdw = error;
989 
990 	audit_fd_pair(fdr, fdw);
991 	fd[0] = fdr;
992 	fd[1] = fdw;
993 	/* pipe groks IOCB_NOWAIT */
994 	files[0]->f_mode |= FMODE_NOWAIT;
995 	files[1]->f_mode |= FMODE_NOWAIT;
996 	return 0;
997 
998  err_fdr:
999 	put_unused_fd(fdr);
1000  err_read_pipe:
1001 	fput(files[0]);
1002 	fput(files[1]);
1003 	return error;
1004 }
1005 
do_pipe_flags(int * fd,int flags)1006 int do_pipe_flags(int *fd, int flags)
1007 {
1008 	struct file *files[2];
1009 	int error = __do_pipe_flags(fd, files, flags);
1010 	if (!error) {
1011 		fd_install(fd[0], files[0]);
1012 		fd_install(fd[1], files[1]);
1013 	}
1014 	return error;
1015 }
1016 
1017 /*
1018  * sys_pipe() is the normal C calling standard for creating
1019  * a pipe. It's not the way Unix traditionally does this, though.
1020  */
do_pipe2(int __user * fildes,int flags)1021 static int do_pipe2(int __user *fildes, int flags)
1022 {
1023 	struct file *files[2];
1024 	int fd[2];
1025 	int error;
1026 
1027 	error = __do_pipe_flags(fd, files, flags);
1028 	if (!error) {
1029 		if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1030 			fput(files[0]);
1031 			fput(files[1]);
1032 			put_unused_fd(fd[0]);
1033 			put_unused_fd(fd[1]);
1034 			error = -EFAULT;
1035 		} else {
1036 			fd_install(fd[0], files[0]);
1037 			fd_install(fd[1], files[1]);
1038 		}
1039 	}
1040 	return error;
1041 }
1042 
SYSCALL_DEFINE2(pipe2,int __user *,fildes,int,flags)1043 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1044 {
1045 	return do_pipe2(fildes, flags);
1046 }
1047 
SYSCALL_DEFINE1(pipe,int __user *,fildes)1048 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1049 {
1050 	return do_pipe2(fildes, 0);
1051 }
1052 
1053 /*
1054  * This is the stupid "wait for pipe to be readable or writable"
1055  * model.
1056  *
1057  * See pipe_read/write() for the proper kind of exclusive wait,
1058  * but that requires that we wake up any other readers/writers
1059  * if we then do not end up reading everything (ie the whole
1060  * "wake_next_reader/writer" logic in pipe_read/write()).
1061  */
pipe_wait_readable(struct pipe_inode_info * pipe)1062 void pipe_wait_readable(struct pipe_inode_info *pipe)
1063 {
1064 	pipe_unlock(pipe);
1065 	wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1066 	pipe_lock(pipe);
1067 }
1068 
pipe_wait_writable(struct pipe_inode_info * pipe)1069 void pipe_wait_writable(struct pipe_inode_info *pipe)
1070 {
1071 	pipe_unlock(pipe);
1072 	wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1073 	pipe_lock(pipe);
1074 }
1075 
1076 /*
1077  * This depends on both the wait (here) and the wakeup (wake_up_partner)
1078  * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1079  * race with the count check and waitqueue prep.
1080  *
1081  * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1082  * then check the condition you're waiting for, and only then sleep. But
1083  * because of the pipe lock, we can check the condition before being on
1084  * the wait queue.
1085  *
1086  * We use the 'rd_wait' waitqueue for pipe partner waiting.
1087  */
wait_for_partner(struct pipe_inode_info * pipe,unsigned int * cnt)1088 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1089 {
1090 	DEFINE_WAIT(rdwait);
1091 	int cur = *cnt;
1092 
1093 	while (cur == *cnt) {
1094 		prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1095 		pipe_unlock(pipe);
1096 		schedule();
1097 		finish_wait(&pipe->rd_wait, &rdwait);
1098 		pipe_lock(pipe);
1099 		if (signal_pending(current))
1100 			break;
1101 	}
1102 	return cur == *cnt ? -ERESTARTSYS : 0;
1103 }
1104 
wake_up_partner(struct pipe_inode_info * pipe)1105 static void wake_up_partner(struct pipe_inode_info *pipe)
1106 {
1107 	wake_up_interruptible_all(&pipe->rd_wait);
1108 }
1109 
fifo_open(struct inode * inode,struct file * filp)1110 static int fifo_open(struct inode *inode, struct file *filp)
1111 {
1112 	struct pipe_inode_info *pipe;
1113 	bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1114 	int ret;
1115 
1116 	filp->f_pipe = 0;
1117 
1118 	spin_lock(&inode->i_lock);
1119 	if (inode->i_pipe) {
1120 		pipe = inode->i_pipe;
1121 		pipe->files++;
1122 		spin_unlock(&inode->i_lock);
1123 	} else {
1124 		spin_unlock(&inode->i_lock);
1125 		pipe = alloc_pipe_info();
1126 		if (!pipe)
1127 			return -ENOMEM;
1128 		pipe->files = 1;
1129 		spin_lock(&inode->i_lock);
1130 		if (unlikely(inode->i_pipe)) {
1131 			inode->i_pipe->files++;
1132 			spin_unlock(&inode->i_lock);
1133 			free_pipe_info(pipe);
1134 			pipe = inode->i_pipe;
1135 		} else {
1136 			inode->i_pipe = pipe;
1137 			spin_unlock(&inode->i_lock);
1138 		}
1139 	}
1140 	filp->private_data = pipe;
1141 	/* OK, we have a pipe and it's pinned down */
1142 
1143 	mutex_lock(&pipe->mutex);
1144 
1145 	/* We can only do regular read/write on fifos */
1146 	stream_open(inode, filp);
1147 
1148 	switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1149 	case FMODE_READ:
1150 	/*
1151 	 *  O_RDONLY
1152 	 *  POSIX.1 says that O_NONBLOCK means return with the FIFO
1153 	 *  opened, even when there is no process writing the FIFO.
1154 	 */
1155 		pipe->r_counter++;
1156 		if (pipe->readers++ == 0)
1157 			wake_up_partner(pipe);
1158 
1159 		if (!is_pipe && !pipe->writers) {
1160 			if ((filp->f_flags & O_NONBLOCK)) {
1161 				/* suppress EPOLLHUP until we have
1162 				 * seen a writer */
1163 				filp->f_pipe = pipe->w_counter;
1164 			} else {
1165 				if (wait_for_partner(pipe, &pipe->w_counter))
1166 					goto err_rd;
1167 			}
1168 		}
1169 		break;
1170 
1171 	case FMODE_WRITE:
1172 	/*
1173 	 *  O_WRONLY
1174 	 *  POSIX.1 says that O_NONBLOCK means return -1 with
1175 	 *  errno=ENXIO when there is no process reading the FIFO.
1176 	 */
1177 		ret = -ENXIO;
1178 		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1179 			goto err;
1180 
1181 		pipe->w_counter++;
1182 		if (!pipe->writers++)
1183 			wake_up_partner(pipe);
1184 
1185 		if (!is_pipe && !pipe->readers) {
1186 			if (wait_for_partner(pipe, &pipe->r_counter))
1187 				goto err_wr;
1188 		}
1189 		break;
1190 
1191 	case FMODE_READ | FMODE_WRITE:
1192 	/*
1193 	 *  O_RDWR
1194 	 *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1195 	 *  This implementation will NEVER block on a O_RDWR open, since
1196 	 *  the process can at least talk to itself.
1197 	 */
1198 
1199 		pipe->readers++;
1200 		pipe->writers++;
1201 		pipe->r_counter++;
1202 		pipe->w_counter++;
1203 		if (pipe->readers == 1 || pipe->writers == 1)
1204 			wake_up_partner(pipe);
1205 		break;
1206 
1207 	default:
1208 		ret = -EINVAL;
1209 		goto err;
1210 	}
1211 
1212 	/* Ok! */
1213 	mutex_unlock(&pipe->mutex);
1214 	return 0;
1215 
1216 err_rd:
1217 	if (!--pipe->readers)
1218 		wake_up_interruptible(&pipe->wr_wait);
1219 	ret = -ERESTARTSYS;
1220 	goto err;
1221 
1222 err_wr:
1223 	if (!--pipe->writers)
1224 		wake_up_interruptible_all(&pipe->rd_wait);
1225 	ret = -ERESTARTSYS;
1226 	goto err;
1227 
1228 err:
1229 	mutex_unlock(&pipe->mutex);
1230 
1231 	put_pipe_info(inode, pipe);
1232 	return ret;
1233 }
1234 
1235 const struct file_operations pipefifo_fops = {
1236 	.open		= fifo_open,
1237 	.read_iter	= pipe_read,
1238 	.write_iter	= pipe_write,
1239 	.poll		= pipe_poll,
1240 	.unlocked_ioctl	= pipe_ioctl,
1241 	.release	= pipe_release,
1242 	.fasync		= pipe_fasync,
1243 	.splice_write	= iter_file_splice_write,
1244 };
1245 
1246 /*
1247  * Currently we rely on the pipe array holding a power-of-2 number
1248  * of pages. Returns 0 on error.
1249  */
round_pipe_size(unsigned int size)1250 unsigned int round_pipe_size(unsigned int size)
1251 {
1252 	if (size > (1U << 31))
1253 		return 0;
1254 
1255 	/* Minimum pipe size, as required by POSIX */
1256 	if (size < PAGE_SIZE)
1257 		return PAGE_SIZE;
1258 
1259 	return roundup_pow_of_two(size);
1260 }
1261 
1262 /*
1263  * Resize the pipe ring to a number of slots.
1264  *
1265  * Note the pipe can be reduced in capacity, but only if the current
1266  * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
1267  * returned instead.
1268  */
pipe_resize_ring(struct pipe_inode_info * pipe,unsigned int nr_slots)1269 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1270 {
1271 	struct pipe_buffer *bufs;
1272 	unsigned int head, tail, mask, n;
1273 
1274 	bufs = kcalloc(nr_slots, sizeof(*bufs),
1275 		       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1276 	if (unlikely(!bufs))
1277 		return -ENOMEM;
1278 
1279 	spin_lock_irq(&pipe->rd_wait.lock);
1280 	mask = pipe->ring_size - 1;
1281 	head = pipe->head;
1282 	tail = pipe->tail;
1283 
1284 	n = pipe_occupancy(head, tail);
1285 	if (nr_slots < n) {
1286 		spin_unlock_irq(&pipe->rd_wait.lock);
1287 		kfree(bufs);
1288 		return -EBUSY;
1289 	}
1290 
1291 	/*
1292 	 * The pipe array wraps around, so just start the new one at zero
1293 	 * and adjust the indices.
1294 	 */
1295 	if (n > 0) {
1296 		unsigned int h = head & mask;
1297 		unsigned int t = tail & mask;
1298 		if (h > t) {
1299 			memcpy(bufs, pipe->bufs + t,
1300 			       n * sizeof(struct pipe_buffer));
1301 		} else {
1302 			unsigned int tsize = pipe->ring_size - t;
1303 			if (h > 0)
1304 				memcpy(bufs + tsize, pipe->bufs,
1305 				       h * sizeof(struct pipe_buffer));
1306 			memcpy(bufs, pipe->bufs + t,
1307 			       tsize * sizeof(struct pipe_buffer));
1308 		}
1309 	}
1310 
1311 	head = n;
1312 	tail = 0;
1313 
1314 	kfree(pipe->bufs);
1315 	pipe->bufs = bufs;
1316 	pipe->ring_size = nr_slots;
1317 	if (pipe->max_usage > nr_slots)
1318 		pipe->max_usage = nr_slots;
1319 	pipe->tail = tail;
1320 	pipe->head = head;
1321 
1322 	if (!pipe_has_watch_queue(pipe)) {
1323 		pipe->max_usage = nr_slots;
1324 		pipe->nr_accounted = nr_slots;
1325 	}
1326 
1327 	spin_unlock_irq(&pipe->rd_wait.lock);
1328 
1329 	/* This might have made more room for writers */
1330 	wake_up_interruptible(&pipe->wr_wait);
1331 	return 0;
1332 }
1333 
1334 /*
1335  * Allocate a new array of pipe buffers and copy the info over. Returns the
1336  * pipe size if successful, or return -ERROR on error.
1337  */
pipe_set_size(struct pipe_inode_info * pipe,unsigned int arg)1338 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
1339 {
1340 	unsigned long user_bufs;
1341 	unsigned int nr_slots, size;
1342 	long ret = 0;
1343 
1344 	if (pipe_has_watch_queue(pipe))
1345 		return -EBUSY;
1346 
1347 	size = round_pipe_size(arg);
1348 	nr_slots = size >> PAGE_SHIFT;
1349 
1350 	if (!nr_slots)
1351 		return -EINVAL;
1352 
1353 	/*
1354 	 * If trying to increase the pipe capacity, check that an
1355 	 * unprivileged user is not trying to exceed various limits
1356 	 * (soft limit check here, hard limit check just below).
1357 	 * Decreasing the pipe capacity is always permitted, even
1358 	 * if the user is currently over a limit.
1359 	 */
1360 	if (nr_slots > pipe->max_usage &&
1361 			size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1362 		return -EPERM;
1363 
1364 	user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1365 
1366 	if (nr_slots > pipe->max_usage &&
1367 			(too_many_pipe_buffers_hard(user_bufs) ||
1368 			 too_many_pipe_buffers_soft(user_bufs)) &&
1369 			pipe_is_unprivileged_user()) {
1370 		ret = -EPERM;
1371 		goto out_revert_acct;
1372 	}
1373 
1374 	ret = pipe_resize_ring(pipe, nr_slots);
1375 	if (ret < 0)
1376 		goto out_revert_acct;
1377 
1378 	return pipe->max_usage * PAGE_SIZE;
1379 
1380 out_revert_acct:
1381 	(void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1382 	return ret;
1383 }
1384 
1385 /*
1386  * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1387  * not enough to verify that this is a pipe.
1388  */
get_pipe_info(struct file * file,bool for_splice)1389 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1390 {
1391 	struct pipe_inode_info *pipe = file->private_data;
1392 
1393 	if (file->f_op != &pipefifo_fops || !pipe)
1394 		return NULL;
1395 	if (for_splice && pipe_has_watch_queue(pipe))
1396 		return NULL;
1397 	return pipe;
1398 }
1399 
pipe_fcntl(struct file * file,unsigned int cmd,unsigned int arg)1400 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
1401 {
1402 	struct pipe_inode_info *pipe;
1403 	long ret;
1404 
1405 	pipe = get_pipe_info(file, false);
1406 	if (!pipe)
1407 		return -EBADF;
1408 
1409 	mutex_lock(&pipe->mutex);
1410 
1411 	switch (cmd) {
1412 	case F_SETPIPE_SZ:
1413 		ret = pipe_set_size(pipe, arg);
1414 		break;
1415 	case F_GETPIPE_SZ:
1416 		ret = pipe->max_usage * PAGE_SIZE;
1417 		break;
1418 	default:
1419 		ret = -EINVAL;
1420 		break;
1421 	}
1422 
1423 	mutex_unlock(&pipe->mutex);
1424 	return ret;
1425 }
1426 
1427 static const struct super_operations pipefs_ops = {
1428 	.destroy_inode = free_inode_nonrcu,
1429 	.statfs = simple_statfs,
1430 };
1431 
1432 /*
1433  * pipefs should _never_ be mounted by userland - too much of security hassle,
1434  * no real gain from having the whole file system mounted. So we don't need
1435  * any operations on the root directory. However, we need a non-trivial
1436  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1437  */
1438 
pipefs_init_fs_context(struct fs_context * fc)1439 static int pipefs_init_fs_context(struct fs_context *fc)
1440 {
1441 	struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1442 	if (!ctx)
1443 		return -ENOMEM;
1444 	ctx->ops = &pipefs_ops;
1445 	ctx->dops = &pipefs_dentry_operations;
1446 	return 0;
1447 }
1448 
1449 static struct file_system_type pipe_fs_type = {
1450 	.name		= "pipefs",
1451 	.init_fs_context = pipefs_init_fs_context,
1452 	.kill_sb	= kill_anon_super,
1453 };
1454 
1455 #ifdef CONFIG_SYSCTL
do_proc_dopipe_max_size_conv(unsigned long * lvalp,unsigned int * valp,int write,void * data)1456 static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
1457 					unsigned int *valp,
1458 					int write, void *data)
1459 {
1460 	if (write) {
1461 		unsigned int val;
1462 
1463 		val = round_pipe_size(*lvalp);
1464 		if (val == 0)
1465 			return -EINVAL;
1466 
1467 		*valp = val;
1468 	} else {
1469 		unsigned int val = *valp;
1470 		*lvalp = (unsigned long) val;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
proc_dopipe_max_size(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1476 static int proc_dopipe_max_size(const struct ctl_table *table, int write,
1477 				void *buffer, size_t *lenp, loff_t *ppos)
1478 {
1479 	return do_proc_douintvec(table, write, buffer, lenp, ppos,
1480 				 do_proc_dopipe_max_size_conv, NULL);
1481 }
1482 
1483 static const struct ctl_table fs_pipe_sysctls[] = {
1484 	{
1485 		.procname	= "pipe-max-size",
1486 		.data		= &pipe_max_size,
1487 		.maxlen		= sizeof(pipe_max_size),
1488 		.mode		= 0644,
1489 		.proc_handler	= proc_dopipe_max_size,
1490 	},
1491 	{
1492 		.procname	= "pipe-user-pages-hard",
1493 		.data		= &pipe_user_pages_hard,
1494 		.maxlen		= sizeof(pipe_user_pages_hard),
1495 		.mode		= 0644,
1496 		.proc_handler	= proc_doulongvec_minmax,
1497 	},
1498 	{
1499 		.procname	= "pipe-user-pages-soft",
1500 		.data		= &pipe_user_pages_soft,
1501 		.maxlen		= sizeof(pipe_user_pages_soft),
1502 		.mode		= 0644,
1503 		.proc_handler	= proc_doulongvec_minmax,
1504 	},
1505 };
1506 #endif
1507 
init_pipe_fs(void)1508 static int __init init_pipe_fs(void)
1509 {
1510 	int err = register_filesystem(&pipe_fs_type);
1511 
1512 	if (!err) {
1513 		pipe_mnt = kern_mount(&pipe_fs_type);
1514 		if (IS_ERR(pipe_mnt)) {
1515 			err = PTR_ERR(pipe_mnt);
1516 			unregister_filesystem(&pipe_fs_type);
1517 		}
1518 	}
1519 #ifdef CONFIG_SYSCTL
1520 	register_sysctl_init("fs", fs_pipe_sysctls);
1521 #endif
1522 	return err;
1523 }
1524 
1525 fs_initcall(init_pipe_fs);
1526