Lines Matching +full:wakeup +full:- +full:capable

1 // SPDX-License-Identifier: GPL-2.0
40 * than two, then a write to a non-empty pipe may block even if the pipe is not
46 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
52 * The max size that a non-root user is allowed to grow the pipe. Can
53 * be set by root in /proc/sys/fs/pipe-max-size
68 * -- David Howells 2019-09-23.
71 * -- Julian Bradfield 1999-06-07.
74 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
77 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
90 if (pipe->files) in pipe_lock()
91 mutex_lock(&pipe->mutex); in pipe_lock()
97 if (pipe->files) in pipe_unlock()
98 mutex_unlock(&pipe->mutex); in pipe_unlock()
116 for (int i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) { in anon_pipe_get_page()
117 if (pipe->tmp_page[i]) { in anon_pipe_get_page()
118 struct page *page = pipe->tmp_page[i]; in anon_pipe_get_page()
119 pipe->tmp_page[i] = NULL; in anon_pipe_get_page()
131 for (int i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) { in anon_pipe_put_page()
132 if (!pipe->tmp_page[i]) { in anon_pipe_put_page()
133 pipe->tmp_page[i] = page; in anon_pipe_put_page()
145 struct page *page = buf->page; in anon_pipe_buf_release()
153 struct page *page = buf->page; in anon_pipe_buf_try_steal()
163 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
177 struct page *page = buf->page; in generic_pipe_buf_try_steal()
193 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
204 return try_get_page(buf->page); in generic_pipe_buf_get()
209 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
219 put_page(buf->page); in generic_pipe_buf_release()
229 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
232 union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) }; in pipe_readable()
233 unsigned int writers = READ_ONCE(pipe->writers); in pipe_readable()
250 spin_lock_irq(&pipe->rd_wait.lock); in pipe_update_tail()
252 if (buf->flags & PIPE_BUF_FLAG_LOSS) in pipe_update_tail()
253 pipe->note_loss = true; in pipe_update_tail()
255 pipe->tail = ++tail; in pipe_update_tail()
256 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_update_tail()
262 * without the spinlock - the mutex is enough. in pipe_update_tail()
264 pipe->tail = ++tail; in pipe_update_tail()
272 struct file *filp = iocb->ki_filp; in anon_pipe_read()
273 struct pipe_inode_info *pipe = filp->private_data; in anon_pipe_read()
282 mutex_lock(&pipe->mutex); in anon_pipe_read()
288 * But when we do wake up writers, we do so using a sync wakeup in anon_pipe_read()
293 /* Read ->head with a barrier vs post_one_notification() */ in anon_pipe_read()
294 unsigned int head = smp_load_acquire(&pipe->head); in anon_pipe_read()
295 unsigned int tail = pipe->tail; in anon_pipe_read()
298 if (pipe->note_loss) { in anon_pipe_read()
303 ret = -ENOBUFS; in anon_pipe_read()
312 ret = -EFAULT; in anon_pipe_read()
316 total_len -= sizeof(n); in anon_pipe_read()
317 pipe->note_loss = false; in anon_pipe_read()
323 size_t chars = buf->len; in anon_pipe_read()
328 if (buf->flags & PIPE_BUF_FLAG_WHOLE) { in anon_pipe_read()
330 ret = -ENOBUFS; in anon_pipe_read()
343 written = copy_page_to_iter(buf->page, buf->offset, chars, to); in anon_pipe_read()
346 ret = -EFAULT; in anon_pipe_read()
350 buf->offset += chars; in anon_pipe_read()
351 buf->len -= chars; in anon_pipe_read()
354 if (buf->flags & PIPE_BUF_FLAG_PACKET) { in anon_pipe_read()
356 buf->len = 0; in anon_pipe_read()
359 if (!buf->len) { in anon_pipe_read()
360 wake_writer |= pipe_full(head, tail, pipe->max_usage); in anon_pipe_read()
363 total_len -= chars; in anon_pipe_read()
370 if (!pipe->writers) in anon_pipe_read()
374 if ((filp->f_flags & O_NONBLOCK) || in anon_pipe_read()
375 (iocb->ki_flags & IOCB_NOWAIT)) { in anon_pipe_read()
376 ret = -EAGAIN; in anon_pipe_read()
379 mutex_unlock(&pipe->mutex); in anon_pipe_read()
384 * just return directly with -ERESTARTSYS if we're interrupted, in anon_pipe_read()
388 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) in anon_pipe_read()
389 return -ERESTARTSYS; in anon_pipe_read()
392 mutex_lock(&pipe->mutex); in anon_pipe_read()
396 mutex_unlock(&pipe->mutex); in anon_pipe_read()
399 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in anon_pipe_read()
401 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in anon_pipe_read()
402 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in anon_pipe_read()
411 file_accessed(iocb->ki_filp); in fifo_pipe_read()
417 return (file->f_flags & O_DIRECT) != 0; in is_packetized()
420 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
423 union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) }; in pipe_writable()
424 unsigned int max_usage = READ_ONCE(pipe->max_usage); in pipe_writable()
427 !READ_ONCE(pipe->readers); in pipe_writable()
433 struct file *filp = iocb->ki_filp; in anon_pipe_write()
434 struct pipe_inode_info *pipe = filp->private_data; in anon_pipe_write()
452 return -EXDEV; in anon_pipe_write()
458 mutex_lock(&pipe->mutex); in anon_pipe_write()
460 if (!pipe->readers) { in anon_pipe_write()
461 if ((iocb->ki_flags & IOCB_NOSIGNAL) == 0) in anon_pipe_write()
463 ret = -EPIPE; in anon_pipe_write()
472 * page-aligns the rest of the writes for large writes in anon_pipe_write()
475 head = pipe->head; in anon_pipe_write()
476 was_empty = pipe_empty(head, pipe->tail); in anon_pipe_write()
477 chars = total_len & (PAGE_SIZE-1); in anon_pipe_write()
479 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); in anon_pipe_write()
480 int offset = buf->offset + buf->len; in anon_pipe_write()
482 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) && in anon_pipe_write()
488 ret = copy_page_from_iter(buf->page, offset, chars, from); in anon_pipe_write()
490 ret = -EFAULT; in anon_pipe_write()
494 buf->len += ret; in anon_pipe_write()
501 if (!pipe->readers) { in anon_pipe_write()
502 if ((iocb->ki_flags & IOCB_NOSIGNAL) == 0) in anon_pipe_write()
505 ret = -EPIPE; in anon_pipe_write()
509 head = pipe->head; in anon_pipe_write()
510 if (!pipe_full(head, pipe->tail, pipe->max_usage)) { in anon_pipe_write()
518 ret = -ENOMEM; in anon_pipe_write()
526 ret = -EFAULT; in anon_pipe_write()
530 pipe->head = head + 1; in anon_pipe_write()
533 buf->page = page; in anon_pipe_write()
534 buf->ops = &anon_pipe_buf_ops; in anon_pipe_write()
535 buf->offset = 0; in anon_pipe_write()
537 buf->flags = PIPE_BUF_FLAG_PACKET; in anon_pipe_write()
539 buf->flags = PIPE_BUF_FLAG_CAN_MERGE; in anon_pipe_write()
541 buf->len = copied; in anon_pipe_write()
551 if ((filp->f_flags & O_NONBLOCK) || in anon_pipe_write()
552 (iocb->ki_flags & IOCB_NOWAIT)) { in anon_pipe_write()
554 ret = -EAGAIN; in anon_pipe_write()
559 ret = -ERESTARTSYS; in anon_pipe_write()
566 * after waiting we need to re-check whether the pipe in anon_pipe_write()
569 mutex_unlock(&pipe->mutex); in anon_pipe_write()
571 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in anon_pipe_write()
572 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in anon_pipe_write()
573 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); in anon_pipe_write()
574 mutex_lock(&pipe->mutex); in anon_pipe_write()
581 mutex_unlock(&pipe->mutex); in anon_pipe_write()
584 * If we do do a wakeup event, we do a 'sync' wakeup, because we in anon_pipe_write()
592 * Epoll nonsensically wants a wakeup whether the pipe in anon_pipe_write()
595 if (was_empty || pipe->poll_usage) in anon_pipe_write()
596 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); in anon_pipe_write()
597 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in anon_pipe_write()
599 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); in anon_pipe_write()
608 struct file *filp = iocb->ki_filp; in fifo_pipe_write()
609 if (sb_start_write_trylock(file_inode(filp)->i_sb)) { in fifo_pipe_write()
613 sb_end_write(file_inode(filp)->i_sb); in fifo_pipe_write()
621 struct pipe_inode_info *pipe = filp->private_data; in pipe_ioctl()
626 mutex_lock(&pipe->mutex); in pipe_ioctl()
628 head = pipe->head; in pipe_ioctl()
629 tail = pipe->tail; in pipe_ioctl()
632 count += pipe_buf(pipe, tail)->len; in pipe_ioctl()
635 mutex_unlock(&pipe->mutex); in pipe_ioctl()
642 mutex_lock(&pipe->mutex); in pipe_ioctl()
644 mutex_unlock(&pipe->mutex); in pipe_ioctl()
654 return -ENOIOCTLCMD; in pipe_ioctl()
658 /* No kernel lock held - fine */
663 struct pipe_inode_info *pipe = filp->private_data; in pipe_poll()
667 WRITE_ONCE(pipe->poll_usage, true); in pipe_poll()
670 * Reading pipe state only -- no need for acquiring the semaphore. in pipe_poll()
675 if (filp->f_mode & FMODE_READ) in pipe_poll()
676 poll_wait(filp, &pipe->rd_wait, wait); in pipe_poll()
677 if (filp->f_mode & FMODE_WRITE) in pipe_poll()
678 poll_wait(filp, &pipe->wr_wait, wait); in pipe_poll()
685 idx.head_tail = READ_ONCE(pipe->head_tail); in pipe_poll()
688 if (filp->f_mode & FMODE_READ) { in pipe_poll()
691 if (!pipe->writers && filp->f_pipe != pipe->w_counter) in pipe_poll()
695 if (filp->f_mode & FMODE_WRITE) { in pipe_poll()
696 if (!pipe_full(idx.head, idx.tail, pipe->max_usage)) in pipe_poll()
702 if (!pipe->readers) in pipe_poll()
713 spin_lock(&inode->i_lock); in put_pipe_info()
714 if (!--pipe->files) { in put_pipe_info()
715 inode->i_pipe = NULL; in put_pipe_info()
718 spin_unlock(&inode->i_lock); in put_pipe_info()
727 struct pipe_inode_info *pipe = file->private_data; in pipe_release()
729 mutex_lock(&pipe->mutex); in pipe_release()
730 if (file->f_mode & FMODE_READ) in pipe_release()
731 pipe->readers--; in pipe_release()
732 if (file->f_mode & FMODE_WRITE) in pipe_release()
733 pipe->writers--; in pipe_release()
736 if (!pipe->readers != !pipe->writers) { in pipe_release()
737 wake_up_interruptible_all(&pipe->rd_wait); in pipe_release()
738 wake_up_interruptible_all(&pipe->wr_wait); in pipe_release()
739 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); in pipe_release()
740 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); in pipe_release()
742 mutex_unlock(&pipe->mutex); in pipe_release()
751 struct pipe_inode_info *pipe = filp->private_data; in pipe_fasync()
754 mutex_lock(&pipe->mutex); in pipe_fasync()
755 if (filp->f_mode & FMODE_READ) in pipe_fasync()
756 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); in pipe_fasync()
757 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { in pipe_fasync()
758 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); in pipe_fasync()
759 if (retval < 0 && (filp->f_mode & FMODE_READ)) in pipe_fasync()
761 fasync_helper(-1, filp, 0, &pipe->fasync_readers); in pipe_fasync()
763 mutex_unlock(&pipe->mutex); in pipe_fasync()
770 return atomic_long_add_return(new - old, &user->pipe_bufs); in account_pipe_buffers()
789 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); in pipe_is_unprivileged_user()
804 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) in alloc_pipe_info()
817 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), in alloc_pipe_info()
820 if (pipe->bufs) { in alloc_pipe_info()
821 init_waitqueue_head(&pipe->rd_wait); in alloc_pipe_info()
822 init_waitqueue_head(&pipe->wr_wait); in alloc_pipe_info()
823 pipe->r_counter = pipe->w_counter = 1; in alloc_pipe_info()
824 pipe->max_usage = pipe_bufs; in alloc_pipe_info()
825 pipe->ring_size = pipe_bufs; in alloc_pipe_info()
826 pipe->nr_accounted = pipe_bufs; in alloc_pipe_info()
827 pipe->user = user; in alloc_pipe_info()
828 mutex_init(&pipe->mutex); in alloc_pipe_info()
829 lock_set_cmp_fn(&pipe->mutex, pipe_lock_cmp_fn, NULL); in alloc_pipe_info()
846 if (pipe->watch_queue) in free_pipe_info()
847 watch_queue_clear(pipe->watch_queue); in free_pipe_info()
850 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); in free_pipe_info()
851 free_uid(pipe->user); in free_pipe_info()
852 for (i = 0; i < pipe->ring_size; i++) { in free_pipe_info()
853 struct pipe_buffer *buf = pipe->bufs + i; in free_pipe_info()
854 if (buf->ops) in free_pipe_info()
858 if (pipe->watch_queue) in free_pipe_info()
859 put_watch_queue(pipe->watch_queue); in free_pipe_info()
861 for (i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) { in free_pipe_info()
862 if (pipe->tmp_page[i]) in free_pipe_info()
863 __free_page(pipe->tmp_page[i]); in free_pipe_info()
865 kfree(pipe->bufs); in free_pipe_info()
877 d_inode(dentry)->i_ino); in pipefs_dname()
888 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); in get_pipe_inode()
894 inode->i_ino = get_next_ino(); in get_pipe_inode()
900 inode->i_pipe = pipe; in get_pipe_inode()
901 pipe->files = 2; in get_pipe_inode()
902 pipe->readers = pipe->writers = 1; in get_pipe_inode()
903 inode->i_fop = &pipeanon_fops; in get_pipe_inode()
911 inode->i_state = I_DIRTY; in get_pipe_inode()
912 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; in get_pipe_inode()
913 inode->i_uid = current_fsuid(); in get_pipe_inode()
914 inode->i_gid = current_fsgid(); in get_pipe_inode()
933 return -ENFILE; in create_pipe_files()
936 error = watch_queue_init(inode->i_pipe); in create_pipe_files()
938 free_pipe_info(inode->i_pipe); in create_pipe_files()
948 free_pipe_info(inode->i_pipe); in create_pipe_files()
953 f->private_data = inode->i_pipe; in create_pipe_files()
954 f->f_pipe = 0; in create_pipe_files()
959 put_pipe_info(inode, inode->i_pipe); in create_pipe_files()
963 res[0]->private_data = inode->i_pipe; in create_pipe_files()
964 res[0]->f_pipe = 0; in create_pipe_files()
970 res[0]->f_mode |= FMODE_NOWAIT; in create_pipe_files()
971 res[1]->f_mode |= FMODE_NOWAIT; in create_pipe_files()
974 * Disable permission and pre-content events, but enable legacy in create_pipe_files()
988 return -EINVAL; in __do_pipe_flags()
1045 error = -EFAULT; in do_pipe2()
1076 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe)); in pipe_wait_readable()
1083 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe)); in pipe_wait_writable()
1088 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1089 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1105 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); in wait_for_partner()
1108 finish_wait(&pipe->rd_wait, &rdwait); in wait_for_partner()
1113 return cur == *cnt ? -ERESTARTSYS : 0; in wait_for_partner()
1118 wake_up_interruptible_all(&pipe->rd_wait); in wake_up_partner()
1123 bool is_pipe = inode->i_fop == &pipeanon_fops; in fifo_open()
1127 filp->f_pipe = 0; in fifo_open()
1129 spin_lock(&inode->i_lock); in fifo_open()
1130 if (inode->i_pipe) { in fifo_open()
1131 pipe = inode->i_pipe; in fifo_open()
1132 pipe->files++; in fifo_open()
1133 spin_unlock(&inode->i_lock); in fifo_open()
1135 spin_unlock(&inode->i_lock); in fifo_open()
1138 return -ENOMEM; in fifo_open()
1139 pipe->files = 1; in fifo_open()
1140 spin_lock(&inode->i_lock); in fifo_open()
1141 if (unlikely(inode->i_pipe)) { in fifo_open()
1142 inode->i_pipe->files++; in fifo_open()
1143 spin_unlock(&inode->i_lock); in fifo_open()
1145 pipe = inode->i_pipe; in fifo_open()
1147 inode->i_pipe = pipe; in fifo_open()
1148 spin_unlock(&inode->i_lock); in fifo_open()
1151 filp->private_data = pipe; in fifo_open()
1154 mutex_lock(&pipe->mutex); in fifo_open()
1159 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) { in fifo_open()
1166 pipe->r_counter++; in fifo_open()
1167 if (pipe->readers++ == 0) in fifo_open()
1170 if (!is_pipe && !pipe->writers) { in fifo_open()
1171 if ((filp->f_flags & O_NONBLOCK)) { in fifo_open()
1174 filp->f_pipe = pipe->w_counter; in fifo_open()
1176 if (wait_for_partner(pipe, &pipe->w_counter)) in fifo_open()
1185 * POSIX.1 says that O_NONBLOCK means return -1 with in fifo_open()
1188 ret = -ENXIO; in fifo_open()
1189 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) in fifo_open()
1192 pipe->w_counter++; in fifo_open()
1193 if (!pipe->writers++) in fifo_open()
1196 if (!is_pipe && !pipe->readers) { in fifo_open()
1197 if (wait_for_partner(pipe, &pipe->r_counter)) in fifo_open()
1210 pipe->readers++; in fifo_open()
1211 pipe->writers++; in fifo_open()
1212 pipe->r_counter++; in fifo_open()
1213 pipe->w_counter++; in fifo_open()
1214 if (pipe->readers == 1 || pipe->writers == 1) in fifo_open()
1219 ret = -EINVAL; in fifo_open()
1224 mutex_unlock(&pipe->mutex); in fifo_open()
1228 if (!--pipe->readers) in fifo_open()
1229 wake_up_interruptible(&pipe->wr_wait); in fifo_open()
1230 ret = -ERESTARTSYS; in fifo_open()
1234 if (!--pipe->writers) in fifo_open()
1235 wake_up_interruptible_all(&pipe->rd_wait); in fifo_open()
1236 ret = -ERESTARTSYS; in fifo_open()
1240 mutex_unlock(&pipe->mutex); in fifo_open()
1269 * Currently we rely on the pipe array holding a power-of-2 number
1296 /* nr_slots larger than limits of pipe->{head,tail} */ in pipe_resize_ring()
1297 if (unlikely(nr_slots > (pipe_index_t)-1u)) in pipe_resize_ring()
1298 return -EINVAL; in pipe_resize_ring()
1303 return -ENOMEM; in pipe_resize_ring()
1305 spin_lock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1306 mask = pipe->ring_size - 1; in pipe_resize_ring()
1307 head = pipe->head; in pipe_resize_ring()
1308 tail = pipe->tail; in pipe_resize_ring()
1312 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1314 return -EBUSY; in pipe_resize_ring()
1325 memcpy(bufs, pipe->bufs + t, in pipe_resize_ring()
1328 unsigned int tsize = pipe->ring_size - t; in pipe_resize_ring()
1330 memcpy(bufs + tsize, pipe->bufs, in pipe_resize_ring()
1332 memcpy(bufs, pipe->bufs + t, in pipe_resize_ring()
1340 kfree(pipe->bufs); in pipe_resize_ring()
1341 pipe->bufs = bufs; in pipe_resize_ring()
1342 pipe->ring_size = nr_slots; in pipe_resize_ring()
1343 if (pipe->max_usage > nr_slots) in pipe_resize_ring()
1344 pipe->max_usage = nr_slots; in pipe_resize_ring()
1345 pipe->tail = tail; in pipe_resize_ring()
1346 pipe->head = head; in pipe_resize_ring()
1349 pipe->max_usage = nr_slots; in pipe_resize_ring()
1350 pipe->nr_accounted = nr_slots; in pipe_resize_ring()
1353 spin_unlock_irq(&pipe->rd_wait.lock); in pipe_resize_ring()
1356 wake_up_interruptible(&pipe->wr_wait); in pipe_resize_ring()
1362 * pipe size if successful, or return -ERROR on error.
1371 return -EBUSY; in pipe_set_size()
1377 return -EINVAL; in pipe_set_size()
1386 if (nr_slots > pipe->max_usage && in pipe_set_size()
1387 size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) in pipe_set_size()
1388 return -EPERM; in pipe_set_size()
1390 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots); in pipe_set_size()
1392 if (nr_slots > pipe->max_usage && in pipe_set_size()
1396 ret = -EPERM; in pipe_set_size()
1404 return pipe->max_usage * PAGE_SIZE; in pipe_set_size()
1407 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted); in pipe_set_size()
1412 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1417 struct pipe_inode_info *pipe = file->private_data; in get_pipe_info()
1421 if (file->f_op != &pipefifo_fops && file->f_op != &pipeanon_fops) in get_pipe_info()
1435 return -EBADF; in pipe_fcntl()
1437 mutex_lock(&pipe->mutex); in pipe_fcntl()
1444 ret = pipe->max_usage * PAGE_SIZE; in pipe_fcntl()
1447 ret = -EINVAL; in pipe_fcntl()
1451 mutex_unlock(&pipe->mutex); in pipe_fcntl()
1461 * pipefs should _never_ be mounted by userland - too much of security hassle,
1463 * any operations on the root directory. However, we need a non-trivial
1464 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1471 return -ENOMEM; in pipefs_init_fs_context()
1472 ctx->ops = &pipefs_ops; in pipefs_init_fs_context()
1473 ctx->dops = &pipefs_dentry_operations; in pipefs_init_fs_context()
1493 return -EINVAL; in do_proc_dopipe_max_size_conv()
1513 .procname = "pipe-max-size",
1520 .procname = "pipe-user-pages-hard",
1527 .procname = "pipe-user-pages-soft",