Lines Matching full:pipe

3  *  linux/fs/pipe.c
37 * New pipe buffers will be restricted to this size while the user is exceeding
38 * their pipe buffer quota. The general pipe use case needs at least two
40 * than two, then a write to a non-empty pipe may block even if the pipe is not
43 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
45 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
46 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
52 * The max size that a non-root user is allowed to grow the pipe. Can
53 * be set by root in /proc/sys/fs/pipe-max-size
88 void pipe_lock(struct pipe_inode_info *pipe) in pipe_lock() argument
90 if (pipe->files) in pipe_lock()
91 mutex_lock(&pipe->mutex); in pipe_lock()
95 void pipe_unlock(struct pipe_inode_info *pipe) in pipe_unlock() argument
97 if (pipe->files) in pipe_unlock()
98 mutex_unlock(&pipe->mutex); in pipe_unlock()
114 static struct page *anon_pipe_get_page(struct pipe_inode_info *pipe) in anon_pipe_get_page() argument
116 for (int i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) { in anon_pipe_get_page()
117 if (pipe->tmp_page[i]) { in anon_pipe_get_page()
118 struct page *page = pipe->tmp_page[i]; in anon_pipe_get_page()
119 pipe->tmp_page[i] = NULL; in anon_pipe_get_page()
127 static void anon_pipe_put_page(struct pipe_inode_info *pipe, in anon_pipe_put_page() argument
131 for (int i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) { in anon_pipe_put_page()
132 if (!pipe->tmp_page[i]) { in anon_pipe_put_page()
133 pipe->tmp_page[i] = page; in anon_pipe_put_page()
142 static void anon_pipe_buf_release(struct pipe_inode_info *pipe, in anon_pipe_buf_release() argument
147 anon_pipe_put_page(pipe, page); in anon_pipe_buf_release()
150 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe, in anon_pipe_buf_try_steal() argument
164 * @pipe: the pipe that the buffer belongs to
174 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe, in generic_pipe_buf_try_steal() argument
194 * @pipe: the pipe that the buffer belongs to
200 * pipe into another.
202 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) in generic_pipe_buf_get() argument
210 * @pipe: the pipe that the buffer belongs to
216 void generic_pipe_buf_release(struct pipe_inode_info *pipe, in generic_pipe_buf_release() argument
229 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
230 static inline bool pipe_readable(const struct pipe_inode_info *pipe) in pipe_readable() argument
232 union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) }; in pipe_readable()
233 unsigned int writers = READ_ONCE(pipe->writers); in pipe_readable()
238 static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe, in pipe_update_tail() argument
242 pipe_buf_release(pipe, buf); in pipe_update_tail()
245 * If the pipe has a watch_queue, we need additional protection in pipe_update_tail()
249 if (pipe_has_watch_queue(pipe)) { in pipe_update_tail()
250 spin_lock_irq(&pipe->rd_wait.lock); in pipe_update_tail()
253 pipe->note_loss = true; in pipe_update_tail()
255 pipe->tail = ++tail; in pipe_update_tail()
256 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_update_tail()
264 pipe->tail = ++tail; in pipe_update_tail()
273 struct pipe_inode_info *pipe = filp->private_data; in anon_pipe_read() local
282 mutex_lock(&pipe->mutex); in anon_pipe_read()
285 * We only wake up writers if the pipe was full when we started reading in anon_pipe_read()
294 unsigned int head = smp_load_acquire(&pipe->head); in anon_pipe_read()
295 unsigned int tail = pipe->tail; in anon_pipe_read()
298 if (pipe->note_loss) { in anon_pipe_read()
317 pipe->note_loss = false; in anon_pipe_read()
322 struct pipe_buffer *buf = pipe_buf(pipe, tail); in anon_pipe_read()
336 error = pipe_buf_confirm(pipe, buf); in anon_pipe_read()
360 wake_writer |= pipe_full(head, tail, pipe->max_usage); in anon_pipe_read()
361 tail = pipe_update_tail(pipe, buf, tail); in anon_pipe_read()
370 if (!pipe->writers) in anon_pipe_read()
379 mutex_unlock(&pipe->mutex); in anon_pipe_read()
388 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) in anon_pipe_read()
392 mutex_lock(&pipe->mutex); in anon_pipe_read()
394 if (pipe_is_empty(pipe)) in anon_pipe_read()
396 mutex_unlock(&pipe->mutex); in anon_pipe_read()
399 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in anon_pipe_read()
401 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in anon_pipe_read()
402 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in anon_pipe_read()
420 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
421 static inline bool pipe_writable(const struct pipe_inode_info *pipe) in pipe_writable() argument
423 union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) }; in pipe_writable()
424 unsigned int max_usage = READ_ONCE(pipe->max_usage); in pipe_writable()
427 !READ_ONCE(pipe->readers); in pipe_writable()
434 struct pipe_inode_info *pipe = filp->private_data; in anon_pipe_write() local
444 * the pipe. in anon_pipe_write()
446 * pipe locked. in anon_pipe_write()
447 * If we had to support locking a normal pipe and a notification pipe at in anon_pipe_write()
451 if (pipe_has_watch_queue(pipe)) in anon_pipe_write()
458 mutex_lock(&pipe->mutex); in anon_pipe_write()
460 if (!pipe->readers) { in anon_pipe_write()
475 head = pipe->head; in anon_pipe_write()
476 was_empty = pipe_empty(head, pipe->tail); in anon_pipe_write()
479 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); in anon_pipe_write()
484 ret = pipe_buf_confirm(pipe, buf); in anon_pipe_write()
501 if (!pipe->readers) { in anon_pipe_write()
509 head = pipe->head; in anon_pipe_write()
510 if (!pipe_full(head, pipe->tail, pipe->max_usage)) { in anon_pipe_write()
515 page = anon_pipe_get_page(pipe); in anon_pipe_write()
524 anon_pipe_put_page(pipe, page); in anon_pipe_write()
530 pipe->head = head + 1; in anon_pipe_write()
532 buf = pipe_buf(pipe, head); in anon_pipe_write()
564 * We're going to release the pipe lock and wait for more in anon_pipe_write()
566 * after waiting we need to re-check whether the pipe in anon_pipe_write()
569 mutex_unlock(&pipe->mutex); in anon_pipe_write()
571 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in anon_pipe_write()
572 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in anon_pipe_write()
573 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); in anon_pipe_write()
574 mutex_lock(&pipe->mutex); in anon_pipe_write()
575 was_empty = pipe_is_empty(pipe); in anon_pipe_write()
579 if (pipe_is_full(pipe)) in anon_pipe_write()
581 mutex_unlock(&pipe->mutex); in anon_pipe_write()
592 * Epoll nonsensically wants a wakeup whether the pipe in anon_pipe_write()
595 if (was_empty || pipe->poll_usage) in anon_pipe_write()
596 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in anon_pipe_write()
597 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in anon_pipe_write()
599 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in anon_pipe_write()
621 struct pipe_inode_info *pipe = filp->private_data; in pipe_ioctl() local
626 mutex_lock(&pipe->mutex); in pipe_ioctl()
628 head = pipe->head; in pipe_ioctl()
629 tail = pipe->tail; in pipe_ioctl()
632 count += pipe_buf(pipe, tail)->len; in pipe_ioctl()
635 mutex_unlock(&pipe->mutex); in pipe_ioctl()
642 mutex_lock(&pipe->mutex); in pipe_ioctl()
643 ret = watch_queue_set_size(pipe, arg); in pipe_ioctl()
644 mutex_unlock(&pipe->mutex); in pipe_ioctl()
650 pipe, (struct watch_notification_filter __user *)arg); in pipe_ioctl()
663 struct pipe_inode_info *pipe = filp->private_data; in pipe_poll() local
667 WRITE_ONCE(pipe->poll_usage, true); in pipe_poll()
670 * Reading pipe state only -- no need for acquiring the semaphore. in pipe_poll()
676 poll_wait(filp, &pipe->rd_wait, wait); in pipe_poll()
678 poll_wait(filp, &pipe->wr_wait, wait); in pipe_poll()
685 idx.head_tail = READ_ONCE(pipe->head_tail); in pipe_poll()
691 if (!pipe->writers && filp->f_pipe != pipe->w_counter) in pipe_poll()
696 if (!pipe_full(idx.head, idx.tail, pipe->max_usage)) in pipe_poll()
702 if (!pipe->readers) in pipe_poll()
709 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) in put_pipe_info() argument
714 if (!--pipe->files) { in put_pipe_info()
721 free_pipe_info(pipe); in put_pipe_info()
727 struct pipe_inode_info *pipe = file->private_data; in pipe_release() local
729 mutex_lock(&pipe->mutex); in pipe_release()
731 pipe->readers--; in pipe_release()
733 pipe->writers--; in pipe_release()
736 if (!pipe->readers != !pipe->writers) { in pipe_release()
737 wake_up_interruptible_all(&pipe->rd_wait); in pipe_release()
738 wake_up_interruptible_all(&pipe->wr_wait); in pipe_release()
739 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in pipe_release()
740 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in pipe_release()
742 mutex_unlock(&pipe->mutex); in pipe_release()
744 put_pipe_info(inode, pipe); in pipe_release()
751 struct pipe_inode_info *pipe = filp->private_data; in pipe_fasync() local
754 mutex_lock(&pipe->mutex); in pipe_fasync()
756 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); in pipe_fasync()
758 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); in pipe_fasync()
761 fasync_helper(-1, filp, 0, &pipe->fasync_readers); in pipe_fasync()
763 mutex_unlock(&pipe->mutex); in pipe_fasync()
794 struct pipe_inode_info *pipe; in alloc_pipe_info() local
800 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); in alloc_pipe_info()
801 if (pipe == NULL) in alloc_pipe_info()
817 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), in alloc_pipe_info()
820 if (pipe->bufs) { in alloc_pipe_info()
821 init_waitqueue_head(&pipe->rd_wait); in alloc_pipe_info()
822 init_waitqueue_head(&pipe->wr_wait); in alloc_pipe_info()
823 pipe->r_counter = pipe->w_counter = 1; in alloc_pipe_info()
824 pipe->max_usage = pipe_bufs; in alloc_pipe_info()
825 pipe->ring_size = pipe_bufs; in alloc_pipe_info()
826 pipe->nr_accounted = pipe_bufs; in alloc_pipe_info()
827 pipe->user = user; in alloc_pipe_info()
828 mutex_init(&pipe->mutex); in alloc_pipe_info()
829 lock_set_cmp_fn(&pipe->mutex, pipe_lock_cmp_fn, NULL); in alloc_pipe_info()
830 return pipe; in alloc_pipe_info()
835 kfree(pipe); in alloc_pipe_info()
841 void free_pipe_info(struct pipe_inode_info *pipe) in free_pipe_info() argument
846 if (pipe->watch_queue) in free_pipe_info()
847 watch_queue_clear(pipe->watch_queue); in free_pipe_info()
850 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); in free_pipe_info()
851 free_uid(pipe->user); in free_pipe_info()
852 for (i = 0; i < pipe->ring_size; i++) { in free_pipe_info()
853 struct pipe_buffer *buf = pipe->bufs + i; in free_pipe_info()
855 pipe_buf_release(pipe, buf); in free_pipe_info()
858 if (pipe->watch_queue) in free_pipe_info()
859 put_watch_queue(pipe->watch_queue); in free_pipe_info()
861 for (i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) { in free_pipe_info()
862 if (pipe->tmp_page[i]) in free_pipe_info()
863 __free_page(pipe->tmp_page[i]); in free_pipe_info()
865 kfree(pipe->bufs); in free_pipe_info()
866 kfree(pipe); in free_pipe_info()
876 return dynamic_dname(buffer, buflen, "pipe:[%lu]", in pipefs_dname()
889 struct pipe_inode_info *pipe; in get_pipe_inode() local
896 pipe = alloc_pipe_info(); in get_pipe_inode()
897 if (!pipe) in get_pipe_inode()
900 inode->i_pipe = pipe; in get_pipe_inode()
901 pipe->files = 2; in get_pipe_inode()
902 pipe->readers = pipe->writers = 1; in get_pipe_inode()
969 /* pipe groks IOCB_NOWAIT */ in create_pipe_files()
1030 * a pipe. It's not the way Unix traditionally does this, though.
1059 SYSCALL_DEFINE1(pipe, int __user *, fildes) in SYSCALL_DEFINE1() argument
1065 * This is the stupid "wait for pipe to be readable or writable"
1073 void pipe_wait_readable(struct pipe_inode_info *pipe) in pipe_wait_readable() argument
1075 pipe_unlock(pipe); in pipe_wait_readable()
1076 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe)); in pipe_wait_readable()
1077 pipe_lock(pipe); in pipe_wait_readable()
1080 void pipe_wait_writable(struct pipe_inode_info *pipe) in pipe_wait_writable() argument
1082 pipe_unlock(pipe); in pipe_wait_writable()
1083 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe)); in pipe_wait_writable()
1084 pipe_lock(pipe); in pipe_wait_writable()
1089 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1094 * because of the pipe lock, we can check the condition before being on
1097 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1099 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) in wait_for_partner() argument
1105 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); in wait_for_partner()
1106 pipe_unlock(pipe); in wait_for_partner()
1108 finish_wait(&pipe->rd_wait, &rdwait); in wait_for_partner()
1109 pipe_lock(pipe); in wait_for_partner()
1116 static void wake_up_partner(struct pipe_inode_info *pipe) in wake_up_partner() argument
1118 wake_up_interruptible_all(&pipe->rd_wait); in wake_up_partner()
1124 struct pipe_inode_info *pipe; in fifo_open() local
1131 pipe = inode->i_pipe; in fifo_open()
1132 pipe->files++; in fifo_open()
1136 pipe = alloc_pipe_info(); in fifo_open()
1137 if (!pipe) in fifo_open()
1139 pipe->files = 1; in fifo_open()
1144 free_pipe_info(pipe); in fifo_open()
1145 pipe = inode->i_pipe; in fifo_open()
1147 inode->i_pipe = pipe; in fifo_open()
1151 filp->private_data = pipe; in fifo_open()
1152 /* OK, we have a pipe and it's pinned down */ in fifo_open()
1154 mutex_lock(&pipe->mutex); in fifo_open()
1166 pipe->r_counter++; in fifo_open()
1167 if (pipe->readers++ == 0) in fifo_open()
1168 wake_up_partner(pipe); in fifo_open()
1170 if (!is_pipe && !pipe->writers) { in fifo_open()
1174 filp->f_pipe = pipe->w_counter; in fifo_open()
1176 if (wait_for_partner(pipe, &pipe->w_counter)) in fifo_open()
1189 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) in fifo_open()
1192 pipe->w_counter++; in fifo_open()
1193 if (!pipe->writers++) in fifo_open()
1194 wake_up_partner(pipe); in fifo_open()
1196 if (!is_pipe && !pipe->readers) { in fifo_open()
1197 if (wait_for_partner(pipe, &pipe->r_counter)) in fifo_open()
1210 pipe->readers++; in fifo_open()
1211 pipe->writers++; in fifo_open()
1212 pipe->r_counter++; in fifo_open()
1213 pipe->w_counter++; in fifo_open()
1214 if (pipe->readers == 1 || pipe->writers == 1) in fifo_open()
1215 wake_up_partner(pipe); in fifo_open()
1224 mutex_unlock(&pipe->mutex); in fifo_open()
1228 if (!--pipe->readers) in fifo_open()
1229 wake_up_interruptible(&pipe->wr_wait); in fifo_open()
1234 if (!--pipe->writers) in fifo_open()
1235 wake_up_interruptible_all(&pipe->rd_wait); in fifo_open()
1240 mutex_unlock(&pipe->mutex); in fifo_open()
1242 put_pipe_info(inode, pipe); in fifo_open()
1269 * Currently we rely on the pipe array holding a power-of-2 number
1277 /* Minimum pipe size, as required by POSIX */ in round_pipe_size()
1285 * Resize the pipe ring to a number of slots.
1287 * Note the pipe can be reduced in capacity, but only if the current
1291 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) in pipe_resize_ring() argument
1296 /* nr_slots larger than limits of pipe->{head,tail} */ in pipe_resize_ring()
1305 spin_lock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1306 mask = pipe->ring_size - 1; in pipe_resize_ring()
1307 head = pipe->head; in pipe_resize_ring()
1308 tail = pipe->tail; in pipe_resize_ring()
1312 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1318 * The pipe array wraps around, so just start the new one at zero in pipe_resize_ring()
1325 memcpy(bufs, pipe->bufs + t, in pipe_resize_ring()
1328 unsigned int tsize = pipe->ring_size - t; in pipe_resize_ring()
1330 memcpy(bufs + tsize, pipe->bufs, in pipe_resize_ring()
1332 memcpy(bufs, pipe->bufs + t, in pipe_resize_ring()
1340 kfree(pipe->bufs); in pipe_resize_ring()
1341 pipe->bufs = bufs; in pipe_resize_ring()
1342 pipe->ring_size = nr_slots; in pipe_resize_ring()
1343 if (pipe->max_usage > nr_slots) in pipe_resize_ring()
1344 pipe->max_usage = nr_slots; in pipe_resize_ring()
1345 pipe->tail = tail; in pipe_resize_ring()
1346 pipe->head = head; in pipe_resize_ring()
1348 if (!pipe_has_watch_queue(pipe)) { in pipe_resize_ring()
1349 pipe->max_usage = nr_slots; in pipe_resize_ring()
1350 pipe->nr_accounted = nr_slots; in pipe_resize_ring()
1353 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1356 wake_up_interruptible(&pipe->wr_wait); in pipe_resize_ring()
1361 * Allocate a new array of pipe buffers and copy the info over. Returns the
1362 * pipe size if successful, or return -ERROR on error.
1364 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg) in pipe_set_size() argument
1370 if (pipe_has_watch_queue(pipe)) in pipe_set_size()
1380 * If trying to increase the pipe capacity, check that an in pipe_set_size()
1383 * Decreasing the pipe capacity is always permitted, even in pipe_set_size()
1386 if (nr_slots > pipe->max_usage && in pipe_set_size()
1390 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots); in pipe_set_size()
1392 if (nr_slots > pipe->max_usage && in pipe_set_size()
1400 ret = pipe_resize_ring(pipe, nr_slots); in pipe_set_size()
1404 return pipe->max_usage * PAGE_SIZE; in pipe_set_size()
1407 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted); in pipe_set_size()
1413 * not enough to verify that this is a pipe.
1417 struct pipe_inode_info *pipe = file->private_data; in get_pipe_info() local
1419 if (!pipe) in get_pipe_info()
1423 if (for_splice && pipe_has_watch_queue(pipe)) in get_pipe_info()
1425 return pipe; in get_pipe_info()
1430 struct pipe_inode_info *pipe; in pipe_fcntl() local
1433 pipe = get_pipe_info(file, false); in pipe_fcntl()
1434 if (!pipe) in pipe_fcntl()
1437 mutex_lock(&pipe->mutex); in pipe_fcntl()
1441 ret = pipe_set_size(pipe, arg); in pipe_fcntl()
1444 ret = pipe->max_usage * PAGE_SIZE; in pipe_fcntl()
1451 mutex_unlock(&pipe->mutex); in pipe_fcntl()
1464 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1513 .procname = "pipe-max-size",
1520 .procname = "pipe-user-pages-hard",
1527 .procname = "pipe-user-pages-soft",