1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/pipe.c 4 * 5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/file.h> 10 #include <linux/poll.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/fs.h> 15 #include <linux/log2.h> 16 #include <linux/mount.h> 17 #include <linux/pseudo_fs.h> 18 #include <linux/magic.h> 19 #include <linux/pipe_fs_i.h> 20 #include <linux/uio.h> 21 #include <linux/highmem.h> 22 #include <linux/pagemap.h> 23 #include <linux/audit.h> 24 #include <linux/syscalls.h> 25 #include <linux/fcntl.h> 26 #include <linux/memcontrol.h> 27 #include <linux/watch_queue.h> 28 #include <linux/sysctl.h> 29 30 #include <linux/uaccess.h> 31 #include <asm/ioctls.h> 32 33 #include "internal.h" 34 35 /* 36 * New pipe buffers will be restricted to this size while the user is exceeding 37 * their pipe buffer quota. The general pipe use case needs at least two 38 * buffers: one for data yet to be read, and one for new data. If this is less 39 * than two, then a write to a non-empty pipe may block even if the pipe is not 40 * full. This can occur with GNU make jobserver or similar uses of pipes as 41 * semaphores: multiple processes may be waiting to write tokens back to the 42 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/. 43 * 44 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their 45 * own risk, namely: pipe writes to non-full pipes may block until the pipe is 46 * emptied. 47 */ 48 #define PIPE_MIN_DEF_BUFFERS 2 49 50 /* 51 * The max size that a non-root user is allowed to grow the pipe. Can 52 * be set by root in /proc/sys/fs/pipe-max-size 53 */ 54 static unsigned int pipe_max_size = 1048576; 55 56 /* Maximum allocatable pages per user. Hard limit is unset by default, soft 57 * matches default values. 58 */ 59 static unsigned long pipe_user_pages_hard; 60 static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; 61 62 /* 63 * We use head and tail indices that aren't masked off, except at the point of 64 * dereference, but rather they're allowed to wrap naturally. This means there 65 * isn't a dead spot in the buffer, but the ring has to be a power of two and 66 * <= 2^31. 67 * -- David Howells 2019-09-23. 68 * 69 * Reads with count = 0 should always return 0. 70 * -- Julian Bradfield 1999-06-07. 71 * 72 * FIFOs and Pipes now generate SIGIO for both readers and writers. 73 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 74 * 75 * pipe_read & write cleanup 76 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 77 */ 78 79 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) 80 { 81 if (pipe->files) 82 mutex_lock_nested(&pipe->mutex, subclass); 83 } 84 85 void pipe_lock(struct pipe_inode_info *pipe) 86 { 87 /* 88 * pipe_lock() nests non-pipe inode locks (for writing to a file) 89 */ 90 pipe_lock_nested(pipe, I_MUTEX_PARENT); 91 } 92 EXPORT_SYMBOL(pipe_lock); 93 94 void pipe_unlock(struct pipe_inode_info *pipe) 95 { 96 if (pipe->files) 97 mutex_unlock(&pipe->mutex); 98 } 99 EXPORT_SYMBOL(pipe_unlock); 100 101 static inline void __pipe_lock(struct pipe_inode_info *pipe) 102 { 103 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); 104 } 105 106 static inline void __pipe_unlock(struct pipe_inode_info *pipe) 107 { 108 mutex_unlock(&pipe->mutex); 109 } 110 111 void pipe_double_lock(struct pipe_inode_info *pipe1, 112 struct pipe_inode_info *pipe2) 113 { 114 BUG_ON(pipe1 == pipe2); 115 116 if (pipe1 < pipe2) { 117 pipe_lock_nested(pipe1, I_MUTEX_PARENT); 118 pipe_lock_nested(pipe2, I_MUTEX_CHILD); 119 } else { 120 pipe_lock_nested(pipe2, I_MUTEX_PARENT); 121 pipe_lock_nested(pipe1, I_MUTEX_CHILD); 122 } 123 } 124 125 static void anon_pipe_buf_release(struct pipe_inode_info *pipe, 126 struct pipe_buffer *buf) 127 { 128 struct page *page = buf->page; 129 130 /* 131 * If nobody else uses this page, and we don't already have a 132 * temporary page, let's keep track of it as a one-deep 133 * allocation cache. (Otherwise just release our reference to it) 134 */ 135 if (page_count(page) == 1 && !pipe->tmp_page) 136 pipe->tmp_page = page; 137 else 138 put_page(page); 139 } 140 141 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe, 142 struct pipe_buffer *buf) 143 { 144 struct page *page = buf->page; 145 146 if (page_count(page) != 1) 147 return false; 148 memcg_kmem_uncharge_page(page, 0); 149 __SetPageLocked(page); 150 return true; 151 } 152 153 /** 154 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer 155 * @pipe: the pipe that the buffer belongs to 156 * @buf: the buffer to attempt to steal 157 * 158 * Description: 159 * This function attempts to steal the &struct page attached to 160 * @buf. If successful, this function returns 0 and returns with 161 * the page locked. The caller may then reuse the page for whatever 162 * he wishes; the typical use is insertion into a different file 163 * page cache. 164 */ 165 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe, 166 struct pipe_buffer *buf) 167 { 168 struct page *page = buf->page; 169 170 /* 171 * A reference of one is golden, that means that the owner of this 172 * page is the only one holding a reference to it. lock the page 173 * and return OK. 174 */ 175 if (page_count(page) == 1) { 176 lock_page(page); 177 return true; 178 } 179 return false; 180 } 181 EXPORT_SYMBOL(generic_pipe_buf_try_steal); 182 183 /** 184 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer 185 * @pipe: the pipe that the buffer belongs to 186 * @buf: the buffer to get a reference to 187 * 188 * Description: 189 * This function grabs an extra reference to @buf. It's used in 190 * the tee() system call, when we duplicate the buffers in one 191 * pipe into another. 192 */ 193 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) 194 { 195 return try_get_page(buf->page); 196 } 197 EXPORT_SYMBOL(generic_pipe_buf_get); 198 199 /** 200 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer 201 * @pipe: the pipe that the buffer belongs to 202 * @buf: the buffer to put a reference to 203 * 204 * Description: 205 * This function releases a reference to @buf. 206 */ 207 void generic_pipe_buf_release(struct pipe_inode_info *pipe, 208 struct pipe_buffer *buf) 209 { 210 put_page(buf->page); 211 } 212 EXPORT_SYMBOL(generic_pipe_buf_release); 213 214 static const struct pipe_buf_operations anon_pipe_buf_ops = { 215 .release = anon_pipe_buf_release, 216 .try_steal = anon_pipe_buf_try_steal, 217 .get = generic_pipe_buf_get, 218 }; 219 220 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ 221 static inline bool pipe_readable(const struct pipe_inode_info *pipe) 222 { 223 unsigned int head = READ_ONCE(pipe->head); 224 unsigned int tail = READ_ONCE(pipe->tail); 225 unsigned int writers = READ_ONCE(pipe->writers); 226 227 return !pipe_empty(head, tail) || !writers; 228 } 229 230 static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe, 231 struct pipe_buffer *buf, 232 unsigned int tail) 233 { 234 pipe_buf_release(pipe, buf); 235 236 /* 237 * If the pipe has a watch_queue, we need additional protection 238 * by the spinlock because notifications get posted with only 239 * this spinlock, no mutex 240 */ 241 if (pipe_has_watch_queue(pipe)) { 242 spin_lock_irq(&pipe->rd_wait.lock); 243 #ifdef CONFIG_WATCH_QUEUE 244 if (buf->flags & PIPE_BUF_FLAG_LOSS) 245 pipe->note_loss = true; 246 #endif 247 pipe->tail = ++tail; 248 spin_unlock_irq(&pipe->rd_wait.lock); 249 return tail; 250 } 251 252 /* 253 * Without a watch_queue, we can simply increment the tail 254 * without the spinlock - the mutex is enough. 255 */ 256 pipe->tail = ++tail; 257 return tail; 258 } 259 260 static ssize_t 261 pipe_read(struct kiocb *iocb, struct iov_iter *to) 262 { 263 size_t total_len = iov_iter_count(to); 264 struct file *filp = iocb->ki_filp; 265 struct pipe_inode_info *pipe = filp->private_data; 266 bool was_full, wake_next_reader = false; 267 ssize_t ret; 268 269 /* Null read succeeds. */ 270 if (unlikely(total_len == 0)) 271 return 0; 272 273 ret = 0; 274 __pipe_lock(pipe); 275 276 /* 277 * We only wake up writers if the pipe was full when we started 278 * reading in order to avoid unnecessary wakeups. 279 * 280 * But when we do wake up writers, we do so using a sync wakeup 281 * (WF_SYNC), because we want them to get going and generate more 282 * data for us. 283 */ 284 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); 285 for (;;) { 286 /* Read ->head with a barrier vs post_one_notification() */ 287 unsigned int head = smp_load_acquire(&pipe->head); 288 unsigned int tail = pipe->tail; 289 unsigned int mask = pipe->ring_size - 1; 290 291 #ifdef CONFIG_WATCH_QUEUE 292 if (pipe->note_loss) { 293 struct watch_notification n; 294 295 if (total_len < 8) { 296 if (ret == 0) 297 ret = -ENOBUFS; 298 break; 299 } 300 301 n.type = WATCH_TYPE_META; 302 n.subtype = WATCH_META_LOSS_NOTIFICATION; 303 n.info = watch_sizeof(n); 304 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) { 305 if (ret == 0) 306 ret = -EFAULT; 307 break; 308 } 309 ret += sizeof(n); 310 total_len -= sizeof(n); 311 pipe->note_loss = false; 312 } 313 #endif 314 315 if (!pipe_empty(head, tail)) { 316 struct pipe_buffer *buf = &pipe->bufs[tail & mask]; 317 size_t chars = buf->len; 318 size_t written; 319 int error; 320 321 if (chars > total_len) { 322 if (buf->flags & PIPE_BUF_FLAG_WHOLE) { 323 if (ret == 0) 324 ret = -ENOBUFS; 325 break; 326 } 327 chars = total_len; 328 } 329 330 error = pipe_buf_confirm(pipe, buf); 331 if (error) { 332 if (!ret) 333 ret = error; 334 break; 335 } 336 337 written = copy_page_to_iter(buf->page, buf->offset, chars, to); 338 if (unlikely(written < chars)) { 339 if (!ret) 340 ret = -EFAULT; 341 break; 342 } 343 ret += chars; 344 buf->offset += chars; 345 buf->len -= chars; 346 347 /* Was it a packet buffer? Clean up and exit */ 348 if (buf->flags & PIPE_BUF_FLAG_PACKET) { 349 total_len = chars; 350 buf->len = 0; 351 } 352 353 if (!buf->len) 354 tail = pipe_update_tail(pipe, buf, tail); 355 total_len -= chars; 356 if (!total_len) 357 break; /* common path: read succeeded */ 358 if (!pipe_empty(head, tail)) /* More to do? */ 359 continue; 360 } 361 362 if (!pipe->writers) 363 break; 364 if (ret) 365 break; 366 if ((filp->f_flags & O_NONBLOCK) || 367 (iocb->ki_flags & IOCB_NOWAIT)) { 368 ret = -EAGAIN; 369 break; 370 } 371 __pipe_unlock(pipe); 372 373 /* 374 * We only get here if we didn't actually read anything. 375 * 376 * However, we could have seen (and removed) a zero-sized 377 * pipe buffer, and might have made space in the buffers 378 * that way. 379 * 380 * You can't make zero-sized pipe buffers by doing an empty 381 * write (not even in packet mode), but they can happen if 382 * the writer gets an EFAULT when trying to fill a buffer 383 * that already got allocated and inserted in the buffer 384 * array. 385 * 386 * So we still need to wake up any pending writers in the 387 * _very_ unlikely case that the pipe was full, but we got 388 * no data. 389 */ 390 if (unlikely(was_full)) 391 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); 392 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 393 394 /* 395 * But because we didn't read anything, at this point we can 396 * just return directly with -ERESTARTSYS if we're interrupted, 397 * since we've done any required wakeups and there's no need 398 * to mark anything accessed. And we've dropped the lock. 399 */ 400 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) 401 return -ERESTARTSYS; 402 403 __pipe_lock(pipe); 404 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); 405 wake_next_reader = true; 406 } 407 if (pipe_empty(pipe->head, pipe->tail)) 408 wake_next_reader = false; 409 __pipe_unlock(pipe); 410 411 if (was_full) 412 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); 413 if (wake_next_reader) 414 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 415 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 416 if (ret > 0) 417 file_accessed(filp); 418 return ret; 419 } 420 421 static inline int is_packetized(struct file *file) 422 { 423 return (file->f_flags & O_DIRECT) != 0; 424 } 425 426 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ 427 static inline bool pipe_writable(const struct pipe_inode_info *pipe) 428 { 429 unsigned int head = READ_ONCE(pipe->head); 430 unsigned int tail = READ_ONCE(pipe->tail); 431 unsigned int max_usage = READ_ONCE(pipe->max_usage); 432 433 return !pipe_full(head, tail, max_usage) || 434 !READ_ONCE(pipe->readers); 435 } 436 437 static ssize_t 438 pipe_write(struct kiocb *iocb, struct iov_iter *from) 439 { 440 struct file *filp = iocb->ki_filp; 441 struct pipe_inode_info *pipe = filp->private_data; 442 unsigned int head; 443 ssize_t ret = 0; 444 size_t total_len = iov_iter_count(from); 445 ssize_t chars; 446 bool was_empty = false; 447 bool wake_next_writer = false; 448 449 /* 450 * Reject writing to watch queue pipes before the point where we lock 451 * the pipe. 452 * Otherwise, lockdep would be unhappy if the caller already has another 453 * pipe locked. 454 * If we had to support locking a normal pipe and a notification pipe at 455 * the same time, we could set up lockdep annotations for that, but 456 * since we don't actually need that, it's simpler to just bail here. 457 */ 458 if (pipe_has_watch_queue(pipe)) 459 return -EXDEV; 460 461 /* Null write succeeds. */ 462 if (unlikely(total_len == 0)) 463 return 0; 464 465 __pipe_lock(pipe); 466 467 if (!pipe->readers) { 468 send_sig(SIGPIPE, current, 0); 469 ret = -EPIPE; 470 goto out; 471 } 472 473 /* 474 * If it wasn't empty we try to merge new data into 475 * the last buffer. 476 * 477 * That naturally merges small writes, but it also 478 * page-aligns the rest of the writes for large writes 479 * spanning multiple pages. 480 */ 481 head = pipe->head; 482 was_empty = pipe_empty(head, pipe->tail); 483 chars = total_len & (PAGE_SIZE-1); 484 if (chars && !was_empty) { 485 unsigned int mask = pipe->ring_size - 1; 486 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask]; 487 int offset = buf->offset + buf->len; 488 489 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) && 490 offset + chars <= PAGE_SIZE) { 491 ret = pipe_buf_confirm(pipe, buf); 492 if (ret) 493 goto out; 494 495 ret = copy_page_from_iter(buf->page, offset, chars, from); 496 if (unlikely(ret < chars)) { 497 ret = -EFAULT; 498 goto out; 499 } 500 501 buf->len += ret; 502 if (!iov_iter_count(from)) 503 goto out; 504 } 505 } 506 507 for (;;) { 508 if (!pipe->readers) { 509 send_sig(SIGPIPE, current, 0); 510 if (!ret) 511 ret = -EPIPE; 512 break; 513 } 514 515 head = pipe->head; 516 if (!pipe_full(head, pipe->tail, pipe->max_usage)) { 517 unsigned int mask = pipe->ring_size - 1; 518 struct pipe_buffer *buf; 519 struct page *page = pipe->tmp_page; 520 int copied; 521 522 if (!page) { 523 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); 524 if (unlikely(!page)) { 525 ret = ret ? : -ENOMEM; 526 break; 527 } 528 pipe->tmp_page = page; 529 } 530 531 /* Allocate a slot in the ring in advance and attach an 532 * empty buffer. If we fault or otherwise fail to use 533 * it, either the reader will consume it or it'll still 534 * be there for the next write. 535 */ 536 pipe->head = head + 1; 537 538 /* Insert it into the buffer array */ 539 buf = &pipe->bufs[head & mask]; 540 buf->page = page; 541 buf->ops = &anon_pipe_buf_ops; 542 buf->offset = 0; 543 buf->len = 0; 544 if (is_packetized(filp)) 545 buf->flags = PIPE_BUF_FLAG_PACKET; 546 else 547 buf->flags = PIPE_BUF_FLAG_CAN_MERGE; 548 pipe->tmp_page = NULL; 549 550 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); 551 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { 552 if (!ret) 553 ret = -EFAULT; 554 break; 555 } 556 ret += copied; 557 buf->len = copied; 558 559 if (!iov_iter_count(from)) 560 break; 561 } 562 563 if (!pipe_full(head, pipe->tail, pipe->max_usage)) 564 continue; 565 566 /* Wait for buffer space to become available. */ 567 if ((filp->f_flags & O_NONBLOCK) || 568 (iocb->ki_flags & IOCB_NOWAIT)) { 569 if (!ret) 570 ret = -EAGAIN; 571 break; 572 } 573 if (signal_pending(current)) { 574 if (!ret) 575 ret = -ERESTARTSYS; 576 break; 577 } 578 579 /* 580 * We're going to release the pipe lock and wait for more 581 * space. We wake up any readers if necessary, and then 582 * after waiting we need to re-check whether the pipe 583 * become empty while we dropped the lock. 584 */ 585 __pipe_unlock(pipe); 586 if (was_empty) 587 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 588 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 589 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); 590 __pipe_lock(pipe); 591 was_empty = pipe_empty(pipe->head, pipe->tail); 592 wake_next_writer = true; 593 } 594 out: 595 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 596 wake_next_writer = false; 597 __pipe_unlock(pipe); 598 599 /* 600 * If we do do a wakeup event, we do a 'sync' wakeup, because we 601 * want the reader to start processing things asap, rather than 602 * leave the data pending. 603 * 604 * This is particularly important for small writes, because of 605 * how (for example) the GNU make jobserver uses small writes to 606 * wake up pending jobs 607 * 608 * Epoll nonsensically wants a wakeup whether the pipe 609 * was already empty or not. 610 */ 611 if (was_empty || pipe->poll_usage) 612 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 613 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 614 if (wake_next_writer) 615 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); 616 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { 617 int err = file_update_time(filp); 618 if (err) 619 ret = err; 620 sb_end_write(file_inode(filp)->i_sb); 621 } 622 return ret; 623 } 624 625 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 626 { 627 struct pipe_inode_info *pipe = filp->private_data; 628 unsigned int count, head, tail, mask; 629 630 switch (cmd) { 631 case FIONREAD: 632 __pipe_lock(pipe); 633 count = 0; 634 head = pipe->head; 635 tail = pipe->tail; 636 mask = pipe->ring_size - 1; 637 638 while (tail != head) { 639 count += pipe->bufs[tail & mask].len; 640 tail++; 641 } 642 __pipe_unlock(pipe); 643 644 return put_user(count, (int __user *)arg); 645 646 #ifdef CONFIG_WATCH_QUEUE 647 case IOC_WATCH_QUEUE_SET_SIZE: { 648 int ret; 649 __pipe_lock(pipe); 650 ret = watch_queue_set_size(pipe, arg); 651 __pipe_unlock(pipe); 652 return ret; 653 } 654 655 case IOC_WATCH_QUEUE_SET_FILTER: 656 return watch_queue_set_filter( 657 pipe, (struct watch_notification_filter __user *)arg); 658 #endif 659 660 default: 661 return -ENOIOCTLCMD; 662 } 663 } 664 665 /* No kernel lock held - fine */ 666 static __poll_t 667 pipe_poll(struct file *filp, poll_table *wait) 668 { 669 __poll_t mask; 670 struct pipe_inode_info *pipe = filp->private_data; 671 unsigned int head, tail; 672 673 /* Epoll has some historical nasty semantics, this enables them */ 674 WRITE_ONCE(pipe->poll_usage, true); 675 676 /* 677 * Reading pipe state only -- no need for acquiring the semaphore. 678 * 679 * But because this is racy, the code has to add the 680 * entry to the poll table _first_ .. 681 */ 682 if (filp->f_mode & FMODE_READ) 683 poll_wait(filp, &pipe->rd_wait, wait); 684 if (filp->f_mode & FMODE_WRITE) 685 poll_wait(filp, &pipe->wr_wait, wait); 686 687 /* 688 * .. and only then can you do the racy tests. That way, 689 * if something changes and you got it wrong, the poll 690 * table entry will wake you up and fix it. 691 */ 692 head = READ_ONCE(pipe->head); 693 tail = READ_ONCE(pipe->tail); 694 695 mask = 0; 696 if (filp->f_mode & FMODE_READ) { 697 if (!pipe_empty(head, tail)) 698 mask |= EPOLLIN | EPOLLRDNORM; 699 if (!pipe->writers && filp->f_version != pipe->w_counter) 700 mask |= EPOLLHUP; 701 } 702 703 if (filp->f_mode & FMODE_WRITE) { 704 if (!pipe_full(head, tail, pipe->max_usage)) 705 mask |= EPOLLOUT | EPOLLWRNORM; 706 /* 707 * Most Unices do not set EPOLLERR for FIFOs but on Linux they 708 * behave exactly like pipes for poll(). 709 */ 710 if (!pipe->readers) 711 mask |= EPOLLERR; 712 } 713 714 return mask; 715 } 716 717 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) 718 { 719 int kill = 0; 720 721 spin_lock(&inode->i_lock); 722 if (!--pipe->files) { 723 inode->i_pipe = NULL; 724 kill = 1; 725 } 726 spin_unlock(&inode->i_lock); 727 728 if (kill) 729 free_pipe_info(pipe); 730 } 731 732 static int 733 pipe_release(struct inode *inode, struct file *file) 734 { 735 struct pipe_inode_info *pipe = file->private_data; 736 737 __pipe_lock(pipe); 738 if (file->f_mode & FMODE_READ) 739 pipe->readers--; 740 if (file->f_mode & FMODE_WRITE) 741 pipe->writers--; 742 743 /* Was that the last reader or writer, but not the other side? */ 744 if (!pipe->readers != !pipe->writers) { 745 wake_up_interruptible_all(&pipe->rd_wait); 746 wake_up_interruptible_all(&pipe->wr_wait); 747 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 748 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 749 } 750 __pipe_unlock(pipe); 751 752 put_pipe_info(inode, pipe); 753 return 0; 754 } 755 756 static int 757 pipe_fasync(int fd, struct file *filp, int on) 758 { 759 struct pipe_inode_info *pipe = filp->private_data; 760 int retval = 0; 761 762 __pipe_lock(pipe); 763 if (filp->f_mode & FMODE_READ) 764 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); 765 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { 766 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); 767 if (retval < 0 && (filp->f_mode & FMODE_READ)) 768 /* this can happen only if on == T */ 769 fasync_helper(-1, filp, 0, &pipe->fasync_readers); 770 } 771 __pipe_unlock(pipe); 772 return retval; 773 } 774 775 unsigned long account_pipe_buffers(struct user_struct *user, 776 unsigned long old, unsigned long new) 777 { 778 return atomic_long_add_return(new - old, &user->pipe_bufs); 779 } 780 781 bool too_many_pipe_buffers_soft(unsigned long user_bufs) 782 { 783 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); 784 785 return soft_limit && user_bufs > soft_limit; 786 } 787 788 bool too_many_pipe_buffers_hard(unsigned long user_bufs) 789 { 790 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); 791 792 return hard_limit && user_bufs > hard_limit; 793 } 794 795 bool pipe_is_unprivileged_user(void) 796 { 797 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); 798 } 799 800 struct pipe_inode_info *alloc_pipe_info(void) 801 { 802 struct pipe_inode_info *pipe; 803 unsigned long pipe_bufs = PIPE_DEF_BUFFERS; 804 struct user_struct *user = get_current_user(); 805 unsigned long user_bufs; 806 unsigned int max_size = READ_ONCE(pipe_max_size); 807 808 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); 809 if (pipe == NULL) 810 goto out_free_uid; 811 812 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) 813 pipe_bufs = max_size >> PAGE_SHIFT; 814 815 user_bufs = account_pipe_buffers(user, 0, pipe_bufs); 816 817 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) { 818 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS); 819 pipe_bufs = PIPE_MIN_DEF_BUFFERS; 820 } 821 822 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user()) 823 goto out_revert_acct; 824 825 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), 826 GFP_KERNEL_ACCOUNT); 827 828 if (pipe->bufs) { 829 init_waitqueue_head(&pipe->rd_wait); 830 init_waitqueue_head(&pipe->wr_wait); 831 pipe->r_counter = pipe->w_counter = 1; 832 pipe->max_usage = pipe_bufs; 833 pipe->ring_size = pipe_bufs; 834 pipe->nr_accounted = pipe_bufs; 835 pipe->user = user; 836 mutex_init(&pipe->mutex); 837 return pipe; 838 } 839 840 out_revert_acct: 841 (void) account_pipe_buffers(user, pipe_bufs, 0); 842 kfree(pipe); 843 out_free_uid: 844 free_uid(user); 845 return NULL; 846 } 847 848 void free_pipe_info(struct pipe_inode_info *pipe) 849 { 850 unsigned int i; 851 852 #ifdef CONFIG_WATCH_QUEUE 853 if (pipe->watch_queue) 854 watch_queue_clear(pipe->watch_queue); 855 #endif 856 857 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); 858 free_uid(pipe->user); 859 for (i = 0; i < pipe->ring_size; i++) { 860 struct pipe_buffer *buf = pipe->bufs + i; 861 if (buf->ops) 862 pipe_buf_release(pipe, buf); 863 } 864 #ifdef CONFIG_WATCH_QUEUE 865 if (pipe->watch_queue) 866 put_watch_queue(pipe->watch_queue); 867 #endif 868 if (pipe->tmp_page) 869 __free_page(pipe->tmp_page); 870 kfree(pipe->bufs); 871 kfree(pipe); 872 } 873 874 static struct vfsmount *pipe_mnt __ro_after_init; 875 876 /* 877 * pipefs_dname() is called from d_path(). 878 */ 879 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) 880 { 881 return dynamic_dname(buffer, buflen, "pipe:[%lu]", 882 d_inode(dentry)->i_ino); 883 } 884 885 static const struct dentry_operations pipefs_dentry_operations = { 886 .d_dname = pipefs_dname, 887 }; 888 889 static struct inode * get_pipe_inode(void) 890 { 891 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); 892 struct pipe_inode_info *pipe; 893 894 if (!inode) 895 goto fail_inode; 896 897 inode->i_ino = get_next_ino(); 898 899 pipe = alloc_pipe_info(); 900 if (!pipe) 901 goto fail_iput; 902 903 inode->i_pipe = pipe; 904 pipe->files = 2; 905 pipe->readers = pipe->writers = 1; 906 inode->i_fop = &pipefifo_fops; 907 908 /* 909 * Mark the inode dirty from the very beginning, 910 * that way it will never be moved to the dirty 911 * list because "mark_inode_dirty()" will think 912 * that it already _is_ on the dirty list. 913 */ 914 inode->i_state = I_DIRTY; 915 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; 916 inode->i_uid = current_fsuid(); 917 inode->i_gid = current_fsgid(); 918 simple_inode_init_ts(inode); 919 920 return inode; 921 922 fail_iput: 923 iput(inode); 924 925 fail_inode: 926 return NULL; 927 } 928 929 int create_pipe_files(struct file **res, int flags) 930 { 931 struct inode *inode = get_pipe_inode(); 932 struct file *f; 933 int error; 934 935 if (!inode) 936 return -ENFILE; 937 938 if (flags & O_NOTIFICATION_PIPE) { 939 error = watch_queue_init(inode->i_pipe); 940 if (error) { 941 free_pipe_info(inode->i_pipe); 942 iput(inode); 943 return error; 944 } 945 } 946 947 f = alloc_file_pseudo(inode, pipe_mnt, "", 948 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)), 949 &pipefifo_fops); 950 if (IS_ERR(f)) { 951 free_pipe_info(inode->i_pipe); 952 iput(inode); 953 return PTR_ERR(f); 954 } 955 956 f->private_data = inode->i_pipe; 957 958 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK), 959 &pipefifo_fops); 960 if (IS_ERR(res[0])) { 961 put_pipe_info(inode, inode->i_pipe); 962 fput(f); 963 return PTR_ERR(res[0]); 964 } 965 res[0]->private_data = inode->i_pipe; 966 res[1] = f; 967 stream_open(inode, res[0]); 968 stream_open(inode, res[1]); 969 return 0; 970 } 971 972 static int __do_pipe_flags(int *fd, struct file **files, int flags) 973 { 974 int error; 975 int fdw, fdr; 976 977 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE)) 978 return -EINVAL; 979 980 error = create_pipe_files(files, flags); 981 if (error) 982 return error; 983 984 error = get_unused_fd_flags(flags); 985 if (error < 0) 986 goto err_read_pipe; 987 fdr = error; 988 989 error = get_unused_fd_flags(flags); 990 if (error < 0) 991 goto err_fdr; 992 fdw = error; 993 994 audit_fd_pair(fdr, fdw); 995 fd[0] = fdr; 996 fd[1] = fdw; 997 /* pipe groks IOCB_NOWAIT */ 998 files[0]->f_mode |= FMODE_NOWAIT; 999 files[1]->f_mode |= FMODE_NOWAIT; 1000 return 0; 1001 1002 err_fdr: 1003 put_unused_fd(fdr); 1004 err_read_pipe: 1005 fput(files[0]); 1006 fput(files[1]); 1007 return error; 1008 } 1009 1010 int do_pipe_flags(int *fd, int flags) 1011 { 1012 struct file *files[2]; 1013 int error = __do_pipe_flags(fd, files, flags); 1014 if (!error) { 1015 fd_install(fd[0], files[0]); 1016 fd_install(fd[1], files[1]); 1017 } 1018 return error; 1019 } 1020 1021 /* 1022 * sys_pipe() is the normal C calling standard for creating 1023 * a pipe. It's not the way Unix traditionally does this, though. 1024 */ 1025 static int do_pipe2(int __user *fildes, int flags) 1026 { 1027 struct file *files[2]; 1028 int fd[2]; 1029 int error; 1030 1031 error = __do_pipe_flags(fd, files, flags); 1032 if (!error) { 1033 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { 1034 fput(files[0]); 1035 fput(files[1]); 1036 put_unused_fd(fd[0]); 1037 put_unused_fd(fd[1]); 1038 error = -EFAULT; 1039 } else { 1040 fd_install(fd[0], files[0]); 1041 fd_install(fd[1], files[1]); 1042 } 1043 } 1044 return error; 1045 } 1046 1047 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) 1048 { 1049 return do_pipe2(fildes, flags); 1050 } 1051 1052 SYSCALL_DEFINE1(pipe, int __user *, fildes) 1053 { 1054 return do_pipe2(fildes, 0); 1055 } 1056 1057 /* 1058 * This is the stupid "wait for pipe to be readable or writable" 1059 * model. 1060 * 1061 * See pipe_read/write() for the proper kind of exclusive wait, 1062 * but that requires that we wake up any other readers/writers 1063 * if we then do not end up reading everything (ie the whole 1064 * "wake_next_reader/writer" logic in pipe_read/write()). 1065 */ 1066 void pipe_wait_readable(struct pipe_inode_info *pipe) 1067 { 1068 pipe_unlock(pipe); 1069 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe)); 1070 pipe_lock(pipe); 1071 } 1072 1073 void pipe_wait_writable(struct pipe_inode_info *pipe) 1074 { 1075 pipe_unlock(pipe); 1076 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe)); 1077 pipe_lock(pipe); 1078 } 1079 1080 /* 1081 * This depends on both the wait (here) and the wakeup (wake_up_partner) 1082 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot 1083 * race with the count check and waitqueue prep. 1084 * 1085 * Normally in order to avoid races, you'd do the prepare_to_wait() first, 1086 * then check the condition you're waiting for, and only then sleep. But 1087 * because of the pipe lock, we can check the condition before being on 1088 * the wait queue. 1089 * 1090 * We use the 'rd_wait' waitqueue for pipe partner waiting. 1091 */ 1092 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) 1093 { 1094 DEFINE_WAIT(rdwait); 1095 int cur = *cnt; 1096 1097 while (cur == *cnt) { 1098 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); 1099 pipe_unlock(pipe); 1100 schedule(); 1101 finish_wait(&pipe->rd_wait, &rdwait); 1102 pipe_lock(pipe); 1103 if (signal_pending(current)) 1104 break; 1105 } 1106 return cur == *cnt ? -ERESTARTSYS : 0; 1107 } 1108 1109 static void wake_up_partner(struct pipe_inode_info *pipe) 1110 { 1111 wake_up_interruptible_all(&pipe->rd_wait); 1112 } 1113 1114 static int fifo_open(struct inode *inode, struct file *filp) 1115 { 1116 struct pipe_inode_info *pipe; 1117 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; 1118 int ret; 1119 1120 filp->f_version = 0; 1121 1122 spin_lock(&inode->i_lock); 1123 if (inode->i_pipe) { 1124 pipe = inode->i_pipe; 1125 pipe->files++; 1126 spin_unlock(&inode->i_lock); 1127 } else { 1128 spin_unlock(&inode->i_lock); 1129 pipe = alloc_pipe_info(); 1130 if (!pipe) 1131 return -ENOMEM; 1132 pipe->files = 1; 1133 spin_lock(&inode->i_lock); 1134 if (unlikely(inode->i_pipe)) { 1135 inode->i_pipe->files++; 1136 spin_unlock(&inode->i_lock); 1137 free_pipe_info(pipe); 1138 pipe = inode->i_pipe; 1139 } else { 1140 inode->i_pipe = pipe; 1141 spin_unlock(&inode->i_lock); 1142 } 1143 } 1144 filp->private_data = pipe; 1145 /* OK, we have a pipe and it's pinned down */ 1146 1147 __pipe_lock(pipe); 1148 1149 /* We can only do regular read/write on fifos */ 1150 stream_open(inode, filp); 1151 1152 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) { 1153 case FMODE_READ: 1154 /* 1155 * O_RDONLY 1156 * POSIX.1 says that O_NONBLOCK means return with the FIFO 1157 * opened, even when there is no process writing the FIFO. 1158 */ 1159 pipe->r_counter++; 1160 if (pipe->readers++ == 0) 1161 wake_up_partner(pipe); 1162 1163 if (!is_pipe && !pipe->writers) { 1164 if ((filp->f_flags & O_NONBLOCK)) { 1165 /* suppress EPOLLHUP until we have 1166 * seen a writer */ 1167 filp->f_version = pipe->w_counter; 1168 } else { 1169 if (wait_for_partner(pipe, &pipe->w_counter)) 1170 goto err_rd; 1171 } 1172 } 1173 break; 1174 1175 case FMODE_WRITE: 1176 /* 1177 * O_WRONLY 1178 * POSIX.1 says that O_NONBLOCK means return -1 with 1179 * errno=ENXIO when there is no process reading the FIFO. 1180 */ 1181 ret = -ENXIO; 1182 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) 1183 goto err; 1184 1185 pipe->w_counter++; 1186 if (!pipe->writers++) 1187 wake_up_partner(pipe); 1188 1189 if (!is_pipe && !pipe->readers) { 1190 if (wait_for_partner(pipe, &pipe->r_counter)) 1191 goto err_wr; 1192 } 1193 break; 1194 1195 case FMODE_READ | FMODE_WRITE: 1196 /* 1197 * O_RDWR 1198 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. 1199 * This implementation will NEVER block on a O_RDWR open, since 1200 * the process can at least talk to itself. 1201 */ 1202 1203 pipe->readers++; 1204 pipe->writers++; 1205 pipe->r_counter++; 1206 pipe->w_counter++; 1207 if (pipe->readers == 1 || pipe->writers == 1) 1208 wake_up_partner(pipe); 1209 break; 1210 1211 default: 1212 ret = -EINVAL; 1213 goto err; 1214 } 1215 1216 /* Ok! */ 1217 __pipe_unlock(pipe); 1218 return 0; 1219 1220 err_rd: 1221 if (!--pipe->readers) 1222 wake_up_interruptible(&pipe->wr_wait); 1223 ret = -ERESTARTSYS; 1224 goto err; 1225 1226 err_wr: 1227 if (!--pipe->writers) 1228 wake_up_interruptible_all(&pipe->rd_wait); 1229 ret = -ERESTARTSYS; 1230 goto err; 1231 1232 err: 1233 __pipe_unlock(pipe); 1234 1235 put_pipe_info(inode, pipe); 1236 return ret; 1237 } 1238 1239 const struct file_operations pipefifo_fops = { 1240 .open = fifo_open, 1241 .llseek = no_llseek, 1242 .read_iter = pipe_read, 1243 .write_iter = pipe_write, 1244 .poll = pipe_poll, 1245 .unlocked_ioctl = pipe_ioctl, 1246 .release = pipe_release, 1247 .fasync = pipe_fasync, 1248 .splice_write = iter_file_splice_write, 1249 }; 1250 1251 /* 1252 * Currently we rely on the pipe array holding a power-of-2 number 1253 * of pages. Returns 0 on error. 1254 */ 1255 unsigned int round_pipe_size(unsigned int size) 1256 { 1257 if (size > (1U << 31)) 1258 return 0; 1259 1260 /* Minimum pipe size, as required by POSIX */ 1261 if (size < PAGE_SIZE) 1262 return PAGE_SIZE; 1263 1264 return roundup_pow_of_two(size); 1265 } 1266 1267 /* 1268 * Resize the pipe ring to a number of slots. 1269 * 1270 * Note the pipe can be reduced in capacity, but only if the current 1271 * occupancy doesn't exceed nr_slots; if it does, EBUSY will be 1272 * returned instead. 1273 */ 1274 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) 1275 { 1276 struct pipe_buffer *bufs; 1277 unsigned int head, tail, mask, n; 1278 1279 bufs = kcalloc(nr_slots, sizeof(*bufs), 1280 GFP_KERNEL_ACCOUNT | __GFP_NOWARN); 1281 if (unlikely(!bufs)) 1282 return -ENOMEM; 1283 1284 spin_lock_irq(&pipe->rd_wait.lock); 1285 mask = pipe->ring_size - 1; 1286 head = pipe->head; 1287 tail = pipe->tail; 1288 1289 n = pipe_occupancy(head, tail); 1290 if (nr_slots < n) { 1291 spin_unlock_irq(&pipe->rd_wait.lock); 1292 kfree(bufs); 1293 return -EBUSY; 1294 } 1295 1296 /* 1297 * The pipe array wraps around, so just start the new one at zero 1298 * and adjust the indices. 1299 */ 1300 if (n > 0) { 1301 unsigned int h = head & mask; 1302 unsigned int t = tail & mask; 1303 if (h > t) { 1304 memcpy(bufs, pipe->bufs + t, 1305 n * sizeof(struct pipe_buffer)); 1306 } else { 1307 unsigned int tsize = pipe->ring_size - t; 1308 if (h > 0) 1309 memcpy(bufs + tsize, pipe->bufs, 1310 h * sizeof(struct pipe_buffer)); 1311 memcpy(bufs, pipe->bufs + t, 1312 tsize * sizeof(struct pipe_buffer)); 1313 } 1314 } 1315 1316 head = n; 1317 tail = 0; 1318 1319 kfree(pipe->bufs); 1320 pipe->bufs = bufs; 1321 pipe->ring_size = nr_slots; 1322 if (pipe->max_usage > nr_slots) 1323 pipe->max_usage = nr_slots; 1324 pipe->tail = tail; 1325 pipe->head = head; 1326 1327 if (!pipe_has_watch_queue(pipe)) { 1328 pipe->max_usage = nr_slots; 1329 pipe->nr_accounted = nr_slots; 1330 } 1331 1332 spin_unlock_irq(&pipe->rd_wait.lock); 1333 1334 /* This might have made more room for writers */ 1335 wake_up_interruptible(&pipe->wr_wait); 1336 return 0; 1337 } 1338 1339 /* 1340 * Allocate a new array of pipe buffers and copy the info over. Returns the 1341 * pipe size if successful, or return -ERROR on error. 1342 */ 1343 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg) 1344 { 1345 unsigned long user_bufs; 1346 unsigned int nr_slots, size; 1347 long ret = 0; 1348 1349 if (pipe_has_watch_queue(pipe)) 1350 return -EBUSY; 1351 1352 size = round_pipe_size(arg); 1353 nr_slots = size >> PAGE_SHIFT; 1354 1355 if (!nr_slots) 1356 return -EINVAL; 1357 1358 /* 1359 * If trying to increase the pipe capacity, check that an 1360 * unprivileged user is not trying to exceed various limits 1361 * (soft limit check here, hard limit check just below). 1362 * Decreasing the pipe capacity is always permitted, even 1363 * if the user is currently over a limit. 1364 */ 1365 if (nr_slots > pipe->max_usage && 1366 size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) 1367 return -EPERM; 1368 1369 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots); 1370 1371 if (nr_slots > pipe->max_usage && 1372 (too_many_pipe_buffers_hard(user_bufs) || 1373 too_many_pipe_buffers_soft(user_bufs)) && 1374 pipe_is_unprivileged_user()) { 1375 ret = -EPERM; 1376 goto out_revert_acct; 1377 } 1378 1379 ret = pipe_resize_ring(pipe, nr_slots); 1380 if (ret < 0) 1381 goto out_revert_acct; 1382 1383 return pipe->max_usage * PAGE_SIZE; 1384 1385 out_revert_acct: 1386 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted); 1387 return ret; 1388 } 1389 1390 /* 1391 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is 1392 * not enough to verify that this is a pipe. 1393 */ 1394 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice) 1395 { 1396 struct pipe_inode_info *pipe = file->private_data; 1397 1398 if (file->f_op != &pipefifo_fops || !pipe) 1399 return NULL; 1400 if (for_splice && pipe_has_watch_queue(pipe)) 1401 return NULL; 1402 return pipe; 1403 } 1404 1405 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg) 1406 { 1407 struct pipe_inode_info *pipe; 1408 long ret; 1409 1410 pipe = get_pipe_info(file, false); 1411 if (!pipe) 1412 return -EBADF; 1413 1414 __pipe_lock(pipe); 1415 1416 switch (cmd) { 1417 case F_SETPIPE_SZ: 1418 ret = pipe_set_size(pipe, arg); 1419 break; 1420 case F_GETPIPE_SZ: 1421 ret = pipe->max_usage * PAGE_SIZE; 1422 break; 1423 default: 1424 ret = -EINVAL; 1425 break; 1426 } 1427 1428 __pipe_unlock(pipe); 1429 return ret; 1430 } 1431 1432 static const struct super_operations pipefs_ops = { 1433 .destroy_inode = free_inode_nonrcu, 1434 .statfs = simple_statfs, 1435 }; 1436 1437 /* 1438 * pipefs should _never_ be mounted by userland - too much of security hassle, 1439 * no real gain from having the whole whorehouse mounted. So we don't need 1440 * any operations on the root directory. However, we need a non-trivial 1441 * d_name - pipe: will go nicely and kill the special-casing in procfs. 1442 */ 1443 1444 static int pipefs_init_fs_context(struct fs_context *fc) 1445 { 1446 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC); 1447 if (!ctx) 1448 return -ENOMEM; 1449 ctx->ops = &pipefs_ops; 1450 ctx->dops = &pipefs_dentry_operations; 1451 return 0; 1452 } 1453 1454 static struct file_system_type pipe_fs_type = { 1455 .name = "pipefs", 1456 .init_fs_context = pipefs_init_fs_context, 1457 .kill_sb = kill_anon_super, 1458 }; 1459 1460 #ifdef CONFIG_SYSCTL 1461 static int do_proc_dopipe_max_size_conv(unsigned long *lvalp, 1462 unsigned int *valp, 1463 int write, void *data) 1464 { 1465 if (write) { 1466 unsigned int val; 1467 1468 val = round_pipe_size(*lvalp); 1469 if (val == 0) 1470 return -EINVAL; 1471 1472 *valp = val; 1473 } else { 1474 unsigned int val = *valp; 1475 *lvalp = (unsigned long) val; 1476 } 1477 1478 return 0; 1479 } 1480 1481 static int proc_dopipe_max_size(struct ctl_table *table, int write, 1482 void *buffer, size_t *lenp, loff_t *ppos) 1483 { 1484 return do_proc_douintvec(table, write, buffer, lenp, ppos, 1485 do_proc_dopipe_max_size_conv, NULL); 1486 } 1487 1488 static struct ctl_table fs_pipe_sysctls[] = { 1489 { 1490 .procname = "pipe-max-size", 1491 .data = &pipe_max_size, 1492 .maxlen = sizeof(pipe_max_size), 1493 .mode = 0644, 1494 .proc_handler = proc_dopipe_max_size, 1495 }, 1496 { 1497 .procname = "pipe-user-pages-hard", 1498 .data = &pipe_user_pages_hard, 1499 .maxlen = sizeof(pipe_user_pages_hard), 1500 .mode = 0644, 1501 .proc_handler = proc_doulongvec_minmax, 1502 }, 1503 { 1504 .procname = "pipe-user-pages-soft", 1505 .data = &pipe_user_pages_soft, 1506 .maxlen = sizeof(pipe_user_pages_soft), 1507 .mode = 0644, 1508 .proc_handler = proc_doulongvec_minmax, 1509 }, 1510 }; 1511 #endif 1512 1513 static int __init init_pipe_fs(void) 1514 { 1515 int err = register_filesystem(&pipe_fs_type); 1516 1517 if (!err) { 1518 pipe_mnt = kern_mount(&pipe_fs_type); 1519 if (IS_ERR(pipe_mnt)) { 1520 err = PTR_ERR(pipe_mnt); 1521 unregister_filesystem(&pipe_fs_type); 1522 } 1523 } 1524 #ifdef CONFIG_SYSCTL 1525 register_sysctl_init("fs", fs_pipe_sysctls); 1526 #endif 1527 return err; 1528 } 1529 1530 fs_initcall(init_pipe_fs); 1531