1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/pipe.c 4 * 5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/file.h> 10 #include <linux/poll.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/fs.h> 15 #include <linux/log2.h> 16 #include <linux/mount.h> 17 #include <linux/magic.h> 18 #include <linux/pipe_fs_i.h> 19 #include <linux/uio.h> 20 #include <linux/highmem.h> 21 #include <linux/pagemap.h> 22 #include <linux/audit.h> 23 #include <linux/syscalls.h> 24 #include <linux/fcntl.h> 25 #include <linux/memcontrol.h> 26 27 #include <linux/uaccess.h> 28 #include <asm/ioctls.h> 29 30 #include "internal.h" 31 32 /* 33 * The max size that a non-root user is allowed to grow the pipe. Can 34 * be set by root in /proc/sys/fs/pipe-max-size 35 */ 36 unsigned int pipe_max_size = 1048576; 37 38 /* Maximum allocatable pages per user. Hard limit is unset by default, soft 39 * matches default values. 40 */ 41 unsigned long pipe_user_pages_hard; 42 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; 43 44 /* 45 * We use a start+len construction, which provides full use of the 46 * allocated memory. 47 * -- Florian Coosmann (FGC) 48 * 49 * Reads with count = 0 should always return 0. 50 * -- Julian Bradfield 1999-06-07. 51 * 52 * FIFOs and Pipes now generate SIGIO for both readers and writers. 53 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 54 * 55 * pipe_read & write cleanup 56 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 57 */ 58 59 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) 60 { 61 if (pipe->files) 62 mutex_lock_nested(&pipe->mutex, subclass); 63 } 64 65 void pipe_lock(struct pipe_inode_info *pipe) 66 { 67 /* 68 * pipe_lock() nests non-pipe inode locks (for writing to a file) 69 */ 70 pipe_lock_nested(pipe, I_MUTEX_PARENT); 71 } 72 EXPORT_SYMBOL(pipe_lock); 73 74 void pipe_unlock(struct pipe_inode_info *pipe) 75 { 76 if (pipe->files) 77 mutex_unlock(&pipe->mutex); 78 } 79 EXPORT_SYMBOL(pipe_unlock); 80 81 static inline void __pipe_lock(struct pipe_inode_info *pipe) 82 { 83 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); 84 } 85 86 static inline void __pipe_unlock(struct pipe_inode_info *pipe) 87 { 88 mutex_unlock(&pipe->mutex); 89 } 90 91 void pipe_double_lock(struct pipe_inode_info *pipe1, 92 struct pipe_inode_info *pipe2) 93 { 94 BUG_ON(pipe1 == pipe2); 95 96 if (pipe1 < pipe2) { 97 pipe_lock_nested(pipe1, I_MUTEX_PARENT); 98 pipe_lock_nested(pipe2, I_MUTEX_CHILD); 99 } else { 100 pipe_lock_nested(pipe2, I_MUTEX_PARENT); 101 pipe_lock_nested(pipe1, I_MUTEX_CHILD); 102 } 103 } 104 105 /* Drop the inode semaphore and wait for a pipe event, atomically */ 106 void pipe_wait(struct pipe_inode_info *pipe) 107 { 108 DEFINE_WAIT(wait); 109 110 /* 111 * Pipes are system-local resources, so sleeping on them 112 * is considered a noninteractive wait: 113 */ 114 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); 115 pipe_unlock(pipe); 116 schedule(); 117 finish_wait(&pipe->wait, &wait); 118 pipe_lock(pipe); 119 } 120 121 static void anon_pipe_buf_release(struct pipe_inode_info *pipe, 122 struct pipe_buffer *buf) 123 { 124 struct page *page = buf->page; 125 126 /* 127 * If nobody else uses this page, and we don't already have a 128 * temporary page, let's keep track of it as a one-deep 129 * allocation cache. (Otherwise just release our reference to it) 130 */ 131 if (page_count(page) == 1 && !pipe->tmp_page) 132 pipe->tmp_page = page; 133 else 134 put_page(page); 135 } 136 137 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, 138 struct pipe_buffer *buf) 139 { 140 struct page *page = buf->page; 141 142 if (page_count(page) == 1) { 143 if (memcg_kmem_enabled()) 144 memcg_kmem_uncharge(page, 0); 145 __SetPageLocked(page); 146 return 0; 147 } 148 return 1; 149 } 150 151 /** 152 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer 153 * @pipe: the pipe that the buffer belongs to 154 * @buf: the buffer to attempt to steal 155 * 156 * Description: 157 * This function attempts to steal the &struct page attached to 158 * @buf. If successful, this function returns 0 and returns with 159 * the page locked. The caller may then reuse the page for whatever 160 * he wishes; the typical use is insertion into a different file 161 * page cache. 162 */ 163 int generic_pipe_buf_steal(struct pipe_inode_info *pipe, 164 struct pipe_buffer *buf) 165 { 166 struct page *page = buf->page; 167 168 /* 169 * A reference of one is golden, that means that the owner of this 170 * page is the only one holding a reference to it. lock the page 171 * and return OK. 172 */ 173 if (page_count(page) == 1) { 174 lock_page(page); 175 return 0; 176 } 177 178 return 1; 179 } 180 EXPORT_SYMBOL(generic_pipe_buf_steal); 181 182 /** 183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer 184 * @pipe: the pipe that the buffer belongs to 185 * @buf: the buffer to get a reference to 186 * 187 * Description: 188 * This function grabs an extra reference to @buf. It's used in 189 * in the tee() system call, when we duplicate the buffers in one 190 * pipe into another. 191 */ 192 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) 193 { 194 get_page(buf->page); 195 } 196 EXPORT_SYMBOL(generic_pipe_buf_get); 197 198 /** 199 * generic_pipe_buf_confirm - verify contents of the pipe buffer 200 * @info: the pipe that the buffer belongs to 201 * @buf: the buffer to confirm 202 * 203 * Description: 204 * This function does nothing, because the generic pipe code uses 205 * pages that are always good when inserted into the pipe. 206 */ 207 int generic_pipe_buf_confirm(struct pipe_inode_info *info, 208 struct pipe_buffer *buf) 209 { 210 return 0; 211 } 212 EXPORT_SYMBOL(generic_pipe_buf_confirm); 213 214 /** 215 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer 216 * @pipe: the pipe that the buffer belongs to 217 * @buf: the buffer to put a reference to 218 * 219 * Description: 220 * This function releases a reference to @buf. 221 */ 222 void generic_pipe_buf_release(struct pipe_inode_info *pipe, 223 struct pipe_buffer *buf) 224 { 225 put_page(buf->page); 226 } 227 EXPORT_SYMBOL(generic_pipe_buf_release); 228 229 static const struct pipe_buf_operations anon_pipe_buf_ops = { 230 .can_merge = 1, 231 .confirm = generic_pipe_buf_confirm, 232 .release = anon_pipe_buf_release, 233 .steal = anon_pipe_buf_steal, 234 .get = generic_pipe_buf_get, 235 }; 236 237 static const struct pipe_buf_operations packet_pipe_buf_ops = { 238 .can_merge = 0, 239 .confirm = generic_pipe_buf_confirm, 240 .release = anon_pipe_buf_release, 241 .steal = anon_pipe_buf_steal, 242 .get = generic_pipe_buf_get, 243 }; 244 245 static ssize_t 246 pipe_read(struct kiocb *iocb, struct iov_iter *to) 247 { 248 size_t total_len = iov_iter_count(to); 249 struct file *filp = iocb->ki_filp; 250 struct pipe_inode_info *pipe = filp->private_data; 251 int do_wakeup; 252 ssize_t ret; 253 254 /* Null read succeeds. */ 255 if (unlikely(total_len == 0)) 256 return 0; 257 258 do_wakeup = 0; 259 ret = 0; 260 __pipe_lock(pipe); 261 for (;;) { 262 int bufs = pipe->nrbufs; 263 if (bufs) { 264 int curbuf = pipe->curbuf; 265 struct pipe_buffer *buf = pipe->bufs + curbuf; 266 size_t chars = buf->len; 267 size_t written; 268 int error; 269 270 if (chars > total_len) 271 chars = total_len; 272 273 error = pipe_buf_confirm(pipe, buf); 274 if (error) { 275 if (!ret) 276 ret = error; 277 break; 278 } 279 280 written = copy_page_to_iter(buf->page, buf->offset, chars, to); 281 if (unlikely(written < chars)) { 282 if (!ret) 283 ret = -EFAULT; 284 break; 285 } 286 ret += chars; 287 buf->offset += chars; 288 buf->len -= chars; 289 290 /* Was it a packet buffer? Clean up and exit */ 291 if (buf->flags & PIPE_BUF_FLAG_PACKET) { 292 total_len = chars; 293 buf->len = 0; 294 } 295 296 if (!buf->len) { 297 pipe_buf_release(pipe, buf); 298 curbuf = (curbuf + 1) & (pipe->buffers - 1); 299 pipe->curbuf = curbuf; 300 pipe->nrbufs = --bufs; 301 do_wakeup = 1; 302 } 303 total_len -= chars; 304 if (!total_len) 305 break; /* common path: read succeeded */ 306 } 307 if (bufs) /* More to do? */ 308 continue; 309 if (!pipe->writers) 310 break; 311 if (!pipe->waiting_writers) { 312 /* syscall merging: Usually we must not sleep 313 * if O_NONBLOCK is set, or if we got some data. 314 * But if a writer sleeps in kernel space, then 315 * we can wait for that data without violating POSIX. 316 */ 317 if (ret) 318 break; 319 if (filp->f_flags & O_NONBLOCK) { 320 ret = -EAGAIN; 321 break; 322 } 323 } 324 if (signal_pending(current)) { 325 if (!ret) 326 ret = -ERESTARTSYS; 327 break; 328 } 329 if (do_wakeup) { 330 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); 331 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 332 } 333 pipe_wait(pipe); 334 } 335 __pipe_unlock(pipe); 336 337 /* Signal writers asynchronously that there is more room. */ 338 if (do_wakeup) { 339 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); 340 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 341 } 342 if (ret > 0) 343 file_accessed(filp); 344 return ret; 345 } 346 347 static inline int is_packetized(struct file *file) 348 { 349 return (file->f_flags & O_DIRECT) != 0; 350 } 351 352 static ssize_t 353 pipe_write(struct kiocb *iocb, struct iov_iter *from) 354 { 355 struct file *filp = iocb->ki_filp; 356 struct pipe_inode_info *pipe = filp->private_data; 357 ssize_t ret = 0; 358 int do_wakeup = 0; 359 size_t total_len = iov_iter_count(from); 360 ssize_t chars; 361 362 /* Null write succeeds. */ 363 if (unlikely(total_len == 0)) 364 return 0; 365 366 __pipe_lock(pipe); 367 368 if (!pipe->readers) { 369 send_sig(SIGPIPE, current, 0); 370 ret = -EPIPE; 371 goto out; 372 } 373 374 /* We try to merge small writes */ 375 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ 376 if (pipe->nrbufs && chars != 0) { 377 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & 378 (pipe->buffers - 1); 379 struct pipe_buffer *buf = pipe->bufs + lastbuf; 380 int offset = buf->offset + buf->len; 381 382 if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) { 383 ret = pipe_buf_confirm(pipe, buf); 384 if (ret) 385 goto out; 386 387 ret = copy_page_from_iter(buf->page, offset, chars, from); 388 if (unlikely(ret < chars)) { 389 ret = -EFAULT; 390 goto out; 391 } 392 do_wakeup = 1; 393 buf->len += ret; 394 if (!iov_iter_count(from)) 395 goto out; 396 } 397 } 398 399 for (;;) { 400 int bufs; 401 402 if (!pipe->readers) { 403 send_sig(SIGPIPE, current, 0); 404 if (!ret) 405 ret = -EPIPE; 406 break; 407 } 408 bufs = pipe->nrbufs; 409 if (bufs < pipe->buffers) { 410 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); 411 struct pipe_buffer *buf = pipe->bufs + newbuf; 412 struct page *page = pipe->tmp_page; 413 int copied; 414 415 if (!page) { 416 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); 417 if (unlikely(!page)) { 418 ret = ret ? : -ENOMEM; 419 break; 420 } 421 pipe->tmp_page = page; 422 } 423 /* Always wake up, even if the copy fails. Otherwise 424 * we lock up (O_NONBLOCK-)readers that sleep due to 425 * syscall merging. 426 * FIXME! Is this really true? 427 */ 428 do_wakeup = 1; 429 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); 430 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { 431 if (!ret) 432 ret = -EFAULT; 433 break; 434 } 435 ret += copied; 436 437 /* Insert it into the buffer array */ 438 buf->page = page; 439 buf->ops = &anon_pipe_buf_ops; 440 buf->offset = 0; 441 buf->len = copied; 442 buf->flags = 0; 443 if (is_packetized(filp)) { 444 buf->ops = &packet_pipe_buf_ops; 445 buf->flags = PIPE_BUF_FLAG_PACKET; 446 } 447 pipe->nrbufs = ++bufs; 448 pipe->tmp_page = NULL; 449 450 if (!iov_iter_count(from)) 451 break; 452 } 453 if (bufs < pipe->buffers) 454 continue; 455 if (filp->f_flags & O_NONBLOCK) { 456 if (!ret) 457 ret = -EAGAIN; 458 break; 459 } 460 if (signal_pending(current)) { 461 if (!ret) 462 ret = -ERESTARTSYS; 463 break; 464 } 465 if (do_wakeup) { 466 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); 467 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 468 do_wakeup = 0; 469 } 470 pipe->waiting_writers++; 471 pipe_wait(pipe); 472 pipe->waiting_writers--; 473 } 474 out: 475 __pipe_unlock(pipe); 476 if (do_wakeup) { 477 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); 478 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 479 } 480 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { 481 int err = file_update_time(filp); 482 if (err) 483 ret = err; 484 sb_end_write(file_inode(filp)->i_sb); 485 } 486 return ret; 487 } 488 489 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 490 { 491 struct pipe_inode_info *pipe = filp->private_data; 492 int count, buf, nrbufs; 493 494 switch (cmd) { 495 case FIONREAD: 496 __pipe_lock(pipe); 497 count = 0; 498 buf = pipe->curbuf; 499 nrbufs = pipe->nrbufs; 500 while (--nrbufs >= 0) { 501 count += pipe->bufs[buf].len; 502 buf = (buf+1) & (pipe->buffers - 1); 503 } 504 __pipe_unlock(pipe); 505 506 return put_user(count, (int __user *)arg); 507 default: 508 return -ENOIOCTLCMD; 509 } 510 } 511 512 static struct wait_queue_head * 513 pipe_get_poll_head(struct file *filp, __poll_t events) 514 { 515 struct pipe_inode_info *pipe = filp->private_data; 516 517 return &pipe->wait; 518 } 519 520 /* No kernel lock held - fine */ 521 static __poll_t pipe_poll_mask(struct file *filp, __poll_t events) 522 { 523 struct pipe_inode_info *pipe = filp->private_data; 524 int nrbufs = pipe->nrbufs; 525 __poll_t mask = 0; 526 527 /* Reading only -- no need for acquiring the semaphore. */ 528 if (filp->f_mode & FMODE_READ) { 529 mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0; 530 if (!pipe->writers && filp->f_version != pipe->w_counter) 531 mask |= EPOLLHUP; 532 } 533 534 if (filp->f_mode & FMODE_WRITE) { 535 mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0; 536 /* 537 * Most Unices do not set EPOLLERR for FIFOs but on Linux they 538 * behave exactly like pipes for poll(). 539 */ 540 if (!pipe->readers) 541 mask |= EPOLLERR; 542 } 543 544 return mask; 545 } 546 547 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) 548 { 549 int kill = 0; 550 551 spin_lock(&inode->i_lock); 552 if (!--pipe->files) { 553 inode->i_pipe = NULL; 554 kill = 1; 555 } 556 spin_unlock(&inode->i_lock); 557 558 if (kill) 559 free_pipe_info(pipe); 560 } 561 562 static int 563 pipe_release(struct inode *inode, struct file *file) 564 { 565 struct pipe_inode_info *pipe = file->private_data; 566 567 __pipe_lock(pipe); 568 if (file->f_mode & FMODE_READ) 569 pipe->readers--; 570 if (file->f_mode & FMODE_WRITE) 571 pipe->writers--; 572 573 if (pipe->readers || pipe->writers) { 574 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP); 575 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 576 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 577 } 578 __pipe_unlock(pipe); 579 580 put_pipe_info(inode, pipe); 581 return 0; 582 } 583 584 static int 585 pipe_fasync(int fd, struct file *filp, int on) 586 { 587 struct pipe_inode_info *pipe = filp->private_data; 588 int retval = 0; 589 590 __pipe_lock(pipe); 591 if (filp->f_mode & FMODE_READ) 592 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); 593 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { 594 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); 595 if (retval < 0 && (filp->f_mode & FMODE_READ)) 596 /* this can happen only if on == T */ 597 fasync_helper(-1, filp, 0, &pipe->fasync_readers); 598 } 599 __pipe_unlock(pipe); 600 return retval; 601 } 602 603 static unsigned long account_pipe_buffers(struct user_struct *user, 604 unsigned long old, unsigned long new) 605 { 606 return atomic_long_add_return(new - old, &user->pipe_bufs); 607 } 608 609 static bool too_many_pipe_buffers_soft(unsigned long user_bufs) 610 { 611 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); 612 613 return soft_limit && user_bufs > soft_limit; 614 } 615 616 static bool too_many_pipe_buffers_hard(unsigned long user_bufs) 617 { 618 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); 619 620 return hard_limit && user_bufs > hard_limit; 621 } 622 623 static bool is_unprivileged_user(void) 624 { 625 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); 626 } 627 628 struct pipe_inode_info *alloc_pipe_info(void) 629 { 630 struct pipe_inode_info *pipe; 631 unsigned long pipe_bufs = PIPE_DEF_BUFFERS; 632 struct user_struct *user = get_current_user(); 633 unsigned long user_bufs; 634 unsigned int max_size = READ_ONCE(pipe_max_size); 635 636 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); 637 if (pipe == NULL) 638 goto out_free_uid; 639 640 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) 641 pipe_bufs = max_size >> PAGE_SHIFT; 642 643 user_bufs = account_pipe_buffers(user, 0, pipe_bufs); 644 645 if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { 646 user_bufs = account_pipe_buffers(user, pipe_bufs, 1); 647 pipe_bufs = 1; 648 } 649 650 if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) 651 goto out_revert_acct; 652 653 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), 654 GFP_KERNEL_ACCOUNT); 655 656 if (pipe->bufs) { 657 init_waitqueue_head(&pipe->wait); 658 pipe->r_counter = pipe->w_counter = 1; 659 pipe->buffers = pipe_bufs; 660 pipe->user = user; 661 mutex_init(&pipe->mutex); 662 return pipe; 663 } 664 665 out_revert_acct: 666 (void) account_pipe_buffers(user, pipe_bufs, 0); 667 kfree(pipe); 668 out_free_uid: 669 free_uid(user); 670 return NULL; 671 } 672 673 void free_pipe_info(struct pipe_inode_info *pipe) 674 { 675 int i; 676 677 (void) account_pipe_buffers(pipe->user, pipe->buffers, 0); 678 free_uid(pipe->user); 679 for (i = 0; i < pipe->buffers; i++) { 680 struct pipe_buffer *buf = pipe->bufs + i; 681 if (buf->ops) 682 pipe_buf_release(pipe, buf); 683 } 684 if (pipe->tmp_page) 685 __free_page(pipe->tmp_page); 686 kfree(pipe->bufs); 687 kfree(pipe); 688 } 689 690 static struct vfsmount *pipe_mnt __read_mostly; 691 692 /* 693 * pipefs_dname() is called from d_path(). 694 */ 695 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) 696 { 697 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", 698 d_inode(dentry)->i_ino); 699 } 700 701 static const struct dentry_operations pipefs_dentry_operations = { 702 .d_dname = pipefs_dname, 703 }; 704 705 static struct inode * get_pipe_inode(void) 706 { 707 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); 708 struct pipe_inode_info *pipe; 709 710 if (!inode) 711 goto fail_inode; 712 713 inode->i_ino = get_next_ino(); 714 715 pipe = alloc_pipe_info(); 716 if (!pipe) 717 goto fail_iput; 718 719 inode->i_pipe = pipe; 720 pipe->files = 2; 721 pipe->readers = pipe->writers = 1; 722 inode->i_fop = &pipefifo_fops; 723 724 /* 725 * Mark the inode dirty from the very beginning, 726 * that way it will never be moved to the dirty 727 * list because "mark_inode_dirty()" will think 728 * that it already _is_ on the dirty list. 729 */ 730 inode->i_state = I_DIRTY; 731 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; 732 inode->i_uid = current_fsuid(); 733 inode->i_gid = current_fsgid(); 734 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 735 736 return inode; 737 738 fail_iput: 739 iput(inode); 740 741 fail_inode: 742 return NULL; 743 } 744 745 int create_pipe_files(struct file **res, int flags) 746 { 747 int err; 748 struct inode *inode = get_pipe_inode(); 749 struct file *f; 750 struct path path; 751 752 if (!inode) 753 return -ENFILE; 754 755 err = -ENOMEM; 756 path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name); 757 if (!path.dentry) 758 goto err_inode; 759 path.mnt = mntget(pipe_mnt); 760 761 d_instantiate(path.dentry, inode); 762 763 f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops); 764 if (IS_ERR(f)) { 765 err = PTR_ERR(f); 766 goto err_dentry; 767 } 768 769 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); 770 f->private_data = inode->i_pipe; 771 772 res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops); 773 if (IS_ERR(res[0])) { 774 err = PTR_ERR(res[0]); 775 goto err_file; 776 } 777 778 path_get(&path); 779 res[0]->private_data = inode->i_pipe; 780 res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK); 781 res[1] = f; 782 return 0; 783 784 err_file: 785 put_filp(f); 786 err_dentry: 787 free_pipe_info(inode->i_pipe); 788 path_put(&path); 789 return err; 790 791 err_inode: 792 free_pipe_info(inode->i_pipe); 793 iput(inode); 794 return err; 795 } 796 797 static int __do_pipe_flags(int *fd, struct file **files, int flags) 798 { 799 int error; 800 int fdw, fdr; 801 802 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) 803 return -EINVAL; 804 805 error = create_pipe_files(files, flags); 806 if (error) 807 return error; 808 809 error = get_unused_fd_flags(flags); 810 if (error < 0) 811 goto err_read_pipe; 812 fdr = error; 813 814 error = get_unused_fd_flags(flags); 815 if (error < 0) 816 goto err_fdr; 817 fdw = error; 818 819 audit_fd_pair(fdr, fdw); 820 fd[0] = fdr; 821 fd[1] = fdw; 822 return 0; 823 824 err_fdr: 825 put_unused_fd(fdr); 826 err_read_pipe: 827 fput(files[0]); 828 fput(files[1]); 829 return error; 830 } 831 832 int do_pipe_flags(int *fd, int flags) 833 { 834 struct file *files[2]; 835 int error = __do_pipe_flags(fd, files, flags); 836 if (!error) { 837 fd_install(fd[0], files[0]); 838 fd_install(fd[1], files[1]); 839 } 840 return error; 841 } 842 843 /* 844 * sys_pipe() is the normal C calling standard for creating 845 * a pipe. It's not the way Unix traditionally does this, though. 846 */ 847 static int do_pipe2(int __user *fildes, int flags) 848 { 849 struct file *files[2]; 850 int fd[2]; 851 int error; 852 853 error = __do_pipe_flags(fd, files, flags); 854 if (!error) { 855 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { 856 fput(files[0]); 857 fput(files[1]); 858 put_unused_fd(fd[0]); 859 put_unused_fd(fd[1]); 860 error = -EFAULT; 861 } else { 862 fd_install(fd[0], files[0]); 863 fd_install(fd[1], files[1]); 864 } 865 } 866 return error; 867 } 868 869 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) 870 { 871 return do_pipe2(fildes, flags); 872 } 873 874 SYSCALL_DEFINE1(pipe, int __user *, fildes) 875 { 876 return do_pipe2(fildes, 0); 877 } 878 879 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) 880 { 881 int cur = *cnt; 882 883 while (cur == *cnt) { 884 pipe_wait(pipe); 885 if (signal_pending(current)) 886 break; 887 } 888 return cur == *cnt ? -ERESTARTSYS : 0; 889 } 890 891 static void wake_up_partner(struct pipe_inode_info *pipe) 892 { 893 wake_up_interruptible(&pipe->wait); 894 } 895 896 static int fifo_open(struct inode *inode, struct file *filp) 897 { 898 struct pipe_inode_info *pipe; 899 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; 900 int ret; 901 902 filp->f_version = 0; 903 904 spin_lock(&inode->i_lock); 905 if (inode->i_pipe) { 906 pipe = inode->i_pipe; 907 pipe->files++; 908 spin_unlock(&inode->i_lock); 909 } else { 910 spin_unlock(&inode->i_lock); 911 pipe = alloc_pipe_info(); 912 if (!pipe) 913 return -ENOMEM; 914 pipe->files = 1; 915 spin_lock(&inode->i_lock); 916 if (unlikely(inode->i_pipe)) { 917 inode->i_pipe->files++; 918 spin_unlock(&inode->i_lock); 919 free_pipe_info(pipe); 920 pipe = inode->i_pipe; 921 } else { 922 inode->i_pipe = pipe; 923 spin_unlock(&inode->i_lock); 924 } 925 } 926 filp->private_data = pipe; 927 /* OK, we have a pipe and it's pinned down */ 928 929 __pipe_lock(pipe); 930 931 /* We can only do regular read/write on fifos */ 932 filp->f_mode &= (FMODE_READ | FMODE_WRITE); 933 934 switch (filp->f_mode) { 935 case FMODE_READ: 936 /* 937 * O_RDONLY 938 * POSIX.1 says that O_NONBLOCK means return with the FIFO 939 * opened, even when there is no process writing the FIFO. 940 */ 941 pipe->r_counter++; 942 if (pipe->readers++ == 0) 943 wake_up_partner(pipe); 944 945 if (!is_pipe && !pipe->writers) { 946 if ((filp->f_flags & O_NONBLOCK)) { 947 /* suppress EPOLLHUP until we have 948 * seen a writer */ 949 filp->f_version = pipe->w_counter; 950 } else { 951 if (wait_for_partner(pipe, &pipe->w_counter)) 952 goto err_rd; 953 } 954 } 955 break; 956 957 case FMODE_WRITE: 958 /* 959 * O_WRONLY 960 * POSIX.1 says that O_NONBLOCK means return -1 with 961 * errno=ENXIO when there is no process reading the FIFO. 962 */ 963 ret = -ENXIO; 964 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) 965 goto err; 966 967 pipe->w_counter++; 968 if (!pipe->writers++) 969 wake_up_partner(pipe); 970 971 if (!is_pipe && !pipe->readers) { 972 if (wait_for_partner(pipe, &pipe->r_counter)) 973 goto err_wr; 974 } 975 break; 976 977 case FMODE_READ | FMODE_WRITE: 978 /* 979 * O_RDWR 980 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. 981 * This implementation will NEVER block on a O_RDWR open, since 982 * the process can at least talk to itself. 983 */ 984 985 pipe->readers++; 986 pipe->writers++; 987 pipe->r_counter++; 988 pipe->w_counter++; 989 if (pipe->readers == 1 || pipe->writers == 1) 990 wake_up_partner(pipe); 991 break; 992 993 default: 994 ret = -EINVAL; 995 goto err; 996 } 997 998 /* Ok! */ 999 __pipe_unlock(pipe); 1000 return 0; 1001 1002 err_rd: 1003 if (!--pipe->readers) 1004 wake_up_interruptible(&pipe->wait); 1005 ret = -ERESTARTSYS; 1006 goto err; 1007 1008 err_wr: 1009 if (!--pipe->writers) 1010 wake_up_interruptible(&pipe->wait); 1011 ret = -ERESTARTSYS; 1012 goto err; 1013 1014 err: 1015 __pipe_unlock(pipe); 1016 1017 put_pipe_info(inode, pipe); 1018 return ret; 1019 } 1020 1021 const struct file_operations pipefifo_fops = { 1022 .open = fifo_open, 1023 .llseek = no_llseek, 1024 .read_iter = pipe_read, 1025 .write_iter = pipe_write, 1026 .get_poll_head = pipe_get_poll_head, 1027 .poll_mask = pipe_poll_mask, 1028 .unlocked_ioctl = pipe_ioctl, 1029 .release = pipe_release, 1030 .fasync = pipe_fasync, 1031 }; 1032 1033 /* 1034 * Currently we rely on the pipe array holding a power-of-2 number 1035 * of pages. Returns 0 on error. 1036 */ 1037 unsigned int round_pipe_size(unsigned long size) 1038 { 1039 if (size > (1U << 31)) 1040 return 0; 1041 1042 /* Minimum pipe size, as required by POSIX */ 1043 if (size < PAGE_SIZE) 1044 return PAGE_SIZE; 1045 1046 return roundup_pow_of_two(size); 1047 } 1048 1049 /* 1050 * Allocate a new array of pipe buffers and copy the info over. Returns the 1051 * pipe size if successful, or return -ERROR on error. 1052 */ 1053 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) 1054 { 1055 struct pipe_buffer *bufs; 1056 unsigned int size, nr_pages; 1057 unsigned long user_bufs; 1058 long ret = 0; 1059 1060 size = round_pipe_size(arg); 1061 nr_pages = size >> PAGE_SHIFT; 1062 1063 if (!nr_pages) 1064 return -EINVAL; 1065 1066 /* 1067 * If trying to increase the pipe capacity, check that an 1068 * unprivileged user is not trying to exceed various limits 1069 * (soft limit check here, hard limit check just below). 1070 * Decreasing the pipe capacity is always permitted, even 1071 * if the user is currently over a limit. 1072 */ 1073 if (nr_pages > pipe->buffers && 1074 size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) 1075 return -EPERM; 1076 1077 user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); 1078 1079 if (nr_pages > pipe->buffers && 1080 (too_many_pipe_buffers_hard(user_bufs) || 1081 too_many_pipe_buffers_soft(user_bufs)) && 1082 is_unprivileged_user()) { 1083 ret = -EPERM; 1084 goto out_revert_acct; 1085 } 1086 1087 /* 1088 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't 1089 * expect a lot of shrink+grow operations, just free and allocate 1090 * again like we would do for growing. If the pipe currently 1091 * contains more buffers than arg, then return busy. 1092 */ 1093 if (nr_pages < pipe->nrbufs) { 1094 ret = -EBUSY; 1095 goto out_revert_acct; 1096 } 1097 1098 bufs = kcalloc(nr_pages, sizeof(*bufs), 1099 GFP_KERNEL_ACCOUNT | __GFP_NOWARN); 1100 if (unlikely(!bufs)) { 1101 ret = -ENOMEM; 1102 goto out_revert_acct; 1103 } 1104 1105 /* 1106 * The pipe array wraps around, so just start the new one at zero 1107 * and adjust the indexes. 1108 */ 1109 if (pipe->nrbufs) { 1110 unsigned int tail; 1111 unsigned int head; 1112 1113 tail = pipe->curbuf + pipe->nrbufs; 1114 if (tail < pipe->buffers) 1115 tail = 0; 1116 else 1117 tail &= (pipe->buffers - 1); 1118 1119 head = pipe->nrbufs - tail; 1120 if (head) 1121 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); 1122 if (tail) 1123 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); 1124 } 1125 1126 pipe->curbuf = 0; 1127 kfree(pipe->bufs); 1128 pipe->bufs = bufs; 1129 pipe->buffers = nr_pages; 1130 return nr_pages * PAGE_SIZE; 1131 1132 out_revert_acct: 1133 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers); 1134 return ret; 1135 } 1136 1137 /* 1138 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same 1139 * location, so checking ->i_pipe is not enough to verify that this is a 1140 * pipe. 1141 */ 1142 struct pipe_inode_info *get_pipe_info(struct file *file) 1143 { 1144 return file->f_op == &pipefifo_fops ? file->private_data : NULL; 1145 } 1146 1147 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 1148 { 1149 struct pipe_inode_info *pipe; 1150 long ret; 1151 1152 pipe = get_pipe_info(file); 1153 if (!pipe) 1154 return -EBADF; 1155 1156 __pipe_lock(pipe); 1157 1158 switch (cmd) { 1159 case F_SETPIPE_SZ: 1160 ret = pipe_set_size(pipe, arg); 1161 break; 1162 case F_GETPIPE_SZ: 1163 ret = pipe->buffers * PAGE_SIZE; 1164 break; 1165 default: 1166 ret = -EINVAL; 1167 break; 1168 } 1169 1170 __pipe_unlock(pipe); 1171 return ret; 1172 } 1173 1174 static const struct super_operations pipefs_ops = { 1175 .destroy_inode = free_inode_nonrcu, 1176 .statfs = simple_statfs, 1177 }; 1178 1179 /* 1180 * pipefs should _never_ be mounted by userland - too much of security hassle, 1181 * no real gain from having the whole whorehouse mounted. So we don't need 1182 * any operations on the root directory. However, we need a non-trivial 1183 * d_name - pipe: will go nicely and kill the special-casing in procfs. 1184 */ 1185 static struct dentry *pipefs_mount(struct file_system_type *fs_type, 1186 int flags, const char *dev_name, void *data) 1187 { 1188 return mount_pseudo(fs_type, "pipe:", &pipefs_ops, 1189 &pipefs_dentry_operations, PIPEFS_MAGIC); 1190 } 1191 1192 static struct file_system_type pipe_fs_type = { 1193 .name = "pipefs", 1194 .mount = pipefs_mount, 1195 .kill_sb = kill_anon_super, 1196 }; 1197 1198 static int __init init_pipe_fs(void) 1199 { 1200 int err = register_filesystem(&pipe_fs_type); 1201 1202 if (!err) { 1203 pipe_mnt = kern_mount(&pipe_fs_type); 1204 if (IS_ERR(pipe_mnt)) { 1205 err = PTR_ERR(pipe_mnt); 1206 unregister_filesystem(&pipe_fs_type); 1207 } 1208 } 1209 return err; 1210 } 1211 1212 fs_initcall(init_pipe_fs); 1213