1 /*- 2 * Copyright (c) 1996 John S. Dyson 3 * Copyright (c) 2012 Giovanni Trematerra 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Absolutely no warranty of function or purpose is made by the author 16 * John S. Dyson. 17 * 4. Modifications may be freely made to this file if the above conditions 18 * are met. 19 */ 20 21 /* 22 * This file contains a high-performance replacement for the socket-based 23 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 24 * all features of sockets, but does do everything that pipes normally 25 * do. 26 */ 27 28 /* 29 * This code has two modes of operation, a small write mode and a large 30 * write mode. The small write mode acts like conventional pipes with 31 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 32 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 33 * and PIPE_SIZE in size, the sending process pins the underlying pages in 34 * memory, and the receiving process copies directly from these pinned pages 35 * in the sending process. 36 * 37 * If the sending process receives a signal, it is possible that it will 38 * go away, and certainly its address space can change, because control 39 * is returned back to the user-mode side. In that case, the pipe code 40 * arranges to copy the buffer supplied by the user process, to a pageable 41 * kernel buffer, and the receiving process will grab the data from the 42 * pageable kernel buffer. Since signals don't happen all that often, 43 * the copy operation is normally eliminated. 44 * 45 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 46 * happen for small transfers so that the system will not spend all of 47 * its time context switching. 48 * 49 * In order to limit the resource use of pipes, two sysctls exist: 50 * 51 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable 52 * address space available to us in pipe_map. This value is normally 53 * autotuned, but may also be loader tuned. 54 * 55 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of 56 * memory in use by pipes. 57 * 58 * Based on how large pipekva is relative to maxpipekva, the following 59 * will happen: 60 * 61 * 0% - 50%: 62 * New pipes are given 16K of memory backing, pipes may dynamically 63 * grow to as large as 64K where needed. 64 * 50% - 75%: 65 * New pipes are given 4K (or PAGE_SIZE) of memory backing, 66 * existing pipes may NOT grow. 67 * 75% - 100%: 68 * New pipes are given 4K (or PAGE_SIZE) of memory backing, 69 * existing pipes will be shrunk down to 4K whenever possible. 70 * 71 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If 72 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE 73 * resize which MUST occur for reverse-direction pipes when they are 74 * first used. 75 * 76 * Additional information about the current state of pipes may be obtained 77 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail, 78 * and kern.ipc.piperesizefail. 79 * 80 * Locking rules: There are two locks present here: A mutex, used via 81 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via 82 * the flag, as mutexes can not persist over uiomove. The mutex 83 * exists only to guard access to the flag, and is not in itself a 84 * locking mechanism. Also note that there is only a single mutex for 85 * both directions of a pipe. 86 * 87 * As pipelock() may have to sleep before it can acquire the flag, it 88 * is important to reread all data after a call to pipelock(); everything 89 * in the structure may have changed. 90 */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/conf.h> 95 #include <sys/fcntl.h> 96 #include <sys/file.h> 97 #include <sys/filedesc.h> 98 #include <sys/filio.h> 99 #include <sys/kernel.h> 100 #include <sys/lock.h> 101 #include <sys/mutex.h> 102 #include <sys/ttycom.h> 103 #include <sys/stat.h> 104 #include <sys/malloc.h> 105 #include <sys/poll.h> 106 #include <sys/selinfo.h> 107 #include <sys/signalvar.h> 108 #include <sys/syscallsubr.h> 109 #include <sys/sysctl.h> 110 #include <sys/sysproto.h> 111 #include <sys/pipe.h> 112 #include <sys/proc.h> 113 #include <sys/vnode.h> 114 #include <sys/uio.h> 115 #include <sys/user.h> 116 #include <sys/event.h> 117 118 #include <security/mac/mac_framework.h> 119 120 #include <vm/vm.h> 121 #include <vm/vm_param.h> 122 #include <vm/vm_object.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_extern.h> 125 #include <vm/pmap.h> 126 #include <vm/vm_map.h> 127 #include <vm/vm_page.h> 128 #include <vm/uma.h> 129 130 /* 131 * Use this define if you want to disable *fancy* VM things. Expect an 132 * approx 30% decrease in transfer rate. This could be useful for 133 * NetBSD or OpenBSD. 134 */ 135 /* #define PIPE_NODIRECT */ 136 137 #define PIPE_PEER(pipe) \ 138 (((pipe)->pipe_type & PIPE_TYPE_NAMED) ? (pipe) : ((pipe)->pipe_peer)) 139 140 /* 141 * interfaces to the outside world 142 */ 143 static fo_rdwr_t pipe_read; 144 static fo_rdwr_t pipe_write; 145 static fo_truncate_t pipe_truncate; 146 static fo_ioctl_t pipe_ioctl; 147 static fo_poll_t pipe_poll; 148 static fo_kqfilter_t pipe_kqfilter; 149 static fo_stat_t pipe_stat; 150 static fo_close_t pipe_close; 151 static fo_chmod_t pipe_chmod; 152 static fo_chown_t pipe_chown; 153 static fo_fill_kinfo_t pipe_fill_kinfo; 154 155 struct fileops pipeops = { 156 .fo_read = pipe_read, 157 .fo_write = pipe_write, 158 .fo_truncate = pipe_truncate, 159 .fo_ioctl = pipe_ioctl, 160 .fo_poll = pipe_poll, 161 .fo_kqfilter = pipe_kqfilter, 162 .fo_stat = pipe_stat, 163 .fo_close = pipe_close, 164 .fo_chmod = pipe_chmod, 165 .fo_chown = pipe_chown, 166 .fo_sendfile = invfo_sendfile, 167 .fo_fill_kinfo = pipe_fill_kinfo, 168 .fo_flags = DFLAG_PASSABLE 169 }; 170 171 static void filt_pipedetach(struct knote *kn); 172 static void filt_pipedetach_notsup(struct knote *kn); 173 static int filt_pipenotsup(struct knote *kn, long hint); 174 static int filt_piperead(struct knote *kn, long hint); 175 static int filt_pipewrite(struct knote *kn, long hint); 176 177 static struct filterops pipe_nfiltops = { 178 .f_isfd = 1, 179 .f_detach = filt_pipedetach_notsup, 180 .f_event = filt_pipenotsup 181 }; 182 static struct filterops pipe_rfiltops = { 183 .f_isfd = 1, 184 .f_detach = filt_pipedetach, 185 .f_event = filt_piperead 186 }; 187 static struct filterops pipe_wfiltops = { 188 .f_isfd = 1, 189 .f_detach = filt_pipedetach, 190 .f_event = filt_pipewrite 191 }; 192 193 /* 194 * Default pipe buffer size(s), this can be kind-of large now because pipe 195 * space is pageable. The pipe code will try to maintain locality of 196 * reference for performance reasons, so small amounts of outstanding I/O 197 * will not wipe the cache. 198 */ 199 #define MINPIPESIZE (PIPE_SIZE/3) 200 #define MAXPIPESIZE (2*PIPE_SIZE/3) 201 202 static long amountpipekva; 203 static int pipefragretry; 204 static int pipeallocfail; 205 static int piperesizefail; 206 static int piperesizeallowed = 1; 207 static long pipe_mindirect = PIPE_MINDIRECT; 208 209 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 210 &maxpipekva, 0, "Pipe KVA limit"); 211 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD, 212 &amountpipekva, 0, "Pipe KVA usage"); 213 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD, 214 &pipefragretry, 0, "Pipe allocation retries due to fragmentation"); 215 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD, 216 &pipeallocfail, 0, "Pipe allocation failures"); 217 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD, 218 &piperesizefail, 0, "Pipe resize failures"); 219 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW, 220 &piperesizeallowed, 0, "Pipe resizing allowed"); 221 222 static void pipeinit(void *dummy __unused); 223 static void pipeclose(struct pipe *cpipe); 224 static void pipe_free_kmem(struct pipe *cpipe); 225 static int pipe_create(struct pipe *pipe, bool backing); 226 static int pipe_paircreate(struct thread *td, struct pipepair **p_pp); 227 static __inline int pipelock(struct pipe *cpipe, int catch); 228 static __inline void pipeunlock(struct pipe *cpipe); 229 static void pipe_timestamp(struct timespec *tsp); 230 #ifndef PIPE_NODIRECT 231 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 232 static void pipe_destroy_write_buffer(struct pipe *wpipe); 233 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 234 static void pipe_clone_write_buffer(struct pipe *wpipe); 235 #endif 236 static int pipespace(struct pipe *cpipe, int size); 237 static int pipespace_new(struct pipe *cpipe, int size); 238 239 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags); 240 static int pipe_zone_init(void *mem, int size, int flags); 241 static void pipe_zone_fini(void *mem, int size); 242 243 static uma_zone_t pipe_zone; 244 static struct unrhdr64 pipeino_unr; 245 static dev_t pipedev_ino; 246 247 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 248 249 static void 250 pipeinit(void *dummy __unused) 251 { 252 253 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair), 254 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini, 255 UMA_ALIGN_PTR, 0); 256 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 257 new_unrhdr64(&pipeino_unr, 1); 258 pipedev_ino = devfs_alloc_cdp_inode(); 259 KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized")); 260 } 261 262 static int 263 sysctl_handle_pipe_mindirect(SYSCTL_HANDLER_ARGS) 264 { 265 int error = 0; 266 long tmp_pipe_mindirect = pipe_mindirect; 267 268 error = sysctl_handle_long(oidp, &tmp_pipe_mindirect, arg2, req); 269 if (error != 0 || req->newptr == NULL) 270 return (error); 271 272 /* 273 * Don't allow pipe_mindirect to be set so low that we violate 274 * atomicity requirements. 275 */ 276 if (tmp_pipe_mindirect <= PIPE_BUF) 277 return (EINVAL); 278 pipe_mindirect = tmp_pipe_mindirect; 279 return (0); 280 } 281 SYSCTL_OID(_kern_ipc, OID_AUTO, pipe_mindirect, CTLTYPE_LONG | CTLFLAG_RW, 282 &pipe_mindirect, 0, sysctl_handle_pipe_mindirect, "L", 283 "Minimum write size triggering VM optimization"); 284 285 static int 286 pipe_zone_ctor(void *mem, int size, void *arg, int flags) 287 { 288 struct pipepair *pp; 289 struct pipe *rpipe, *wpipe; 290 291 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size")); 292 293 pp = (struct pipepair *)mem; 294 295 /* 296 * We zero both pipe endpoints to make sure all the kmem pointers 297 * are NULL, flag fields are zero'd, etc. We timestamp both 298 * endpoints with the same time. 299 */ 300 rpipe = &pp->pp_rpipe; 301 bzero(rpipe, sizeof(*rpipe)); 302 pipe_timestamp(&rpipe->pipe_ctime); 303 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime; 304 305 wpipe = &pp->pp_wpipe; 306 bzero(wpipe, sizeof(*wpipe)); 307 wpipe->pipe_ctime = rpipe->pipe_ctime; 308 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime; 309 310 rpipe->pipe_peer = wpipe; 311 rpipe->pipe_pair = pp; 312 wpipe->pipe_peer = rpipe; 313 wpipe->pipe_pair = pp; 314 315 /* 316 * Mark both endpoints as present; they will later get free'd 317 * one at a time. When both are free'd, then the whole pair 318 * is released. 319 */ 320 rpipe->pipe_present = PIPE_ACTIVE; 321 wpipe->pipe_present = PIPE_ACTIVE; 322 323 /* 324 * Eventually, the MAC Framework may initialize the label 325 * in ctor or init, but for now we do it elswhere to avoid 326 * blocking in ctor or init. 327 */ 328 pp->pp_label = NULL; 329 330 return (0); 331 } 332 333 static int 334 pipe_zone_init(void *mem, int size, int flags) 335 { 336 struct pipepair *pp; 337 338 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size")); 339 340 pp = (struct pipepair *)mem; 341 342 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_NEW); 343 return (0); 344 } 345 346 static void 347 pipe_zone_fini(void *mem, int size) 348 { 349 struct pipepair *pp; 350 351 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size")); 352 353 pp = (struct pipepair *)mem; 354 355 mtx_destroy(&pp->pp_mtx); 356 } 357 358 static int 359 pipe_paircreate(struct thread *td, struct pipepair **p_pp) 360 { 361 struct pipepair *pp; 362 struct pipe *rpipe, *wpipe; 363 int error; 364 365 *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK); 366 #ifdef MAC 367 /* 368 * The MAC label is shared between the connected endpoints. As a 369 * result mac_pipe_init() and mac_pipe_create() are called once 370 * for the pair, and not on the endpoints. 371 */ 372 mac_pipe_init(pp); 373 mac_pipe_create(td->td_ucred, pp); 374 #endif 375 rpipe = &pp->pp_rpipe; 376 wpipe = &pp->pp_wpipe; 377 378 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe)); 379 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe)); 380 381 /* 382 * Only the forward direction pipe is backed by big buffer by 383 * default. 384 */ 385 error = pipe_create(rpipe, true); 386 if (error != 0) 387 goto fail; 388 error = pipe_create(wpipe, false); 389 if (error != 0) { 390 /* 391 * This cleanup leaves the pipe inode number for rpipe 392 * still allocated, but never used. We do not free 393 * inode numbers for opened pipes, which is required 394 * for correctness because numbers must be unique. 395 * But also it avoids any memory use by the unr 396 * allocator, so stashing away the transient inode 397 * number is reasonable. 398 */ 399 pipe_free_kmem(rpipe); 400 goto fail; 401 } 402 403 rpipe->pipe_state |= PIPE_DIRECTOK; 404 wpipe->pipe_state |= PIPE_DIRECTOK; 405 return (0); 406 407 fail: 408 knlist_destroy(&rpipe->pipe_sel.si_note); 409 knlist_destroy(&wpipe->pipe_sel.si_note); 410 #ifdef MAC 411 mac_pipe_destroy(pp); 412 #endif 413 uma_zfree(pipe_zone, pp); 414 return (error); 415 } 416 417 int 418 pipe_named_ctor(struct pipe **ppipe, struct thread *td) 419 { 420 struct pipepair *pp; 421 int error; 422 423 error = pipe_paircreate(td, &pp); 424 if (error != 0) 425 return (error); 426 pp->pp_rpipe.pipe_type |= PIPE_TYPE_NAMED; 427 *ppipe = &pp->pp_rpipe; 428 return (0); 429 } 430 431 void 432 pipe_dtor(struct pipe *dpipe) 433 { 434 struct pipe *peer; 435 436 peer = (dpipe->pipe_type & PIPE_TYPE_NAMED) != 0 ? dpipe->pipe_peer : NULL; 437 funsetown(&dpipe->pipe_sigio); 438 pipeclose(dpipe); 439 if (peer != NULL) { 440 funsetown(&peer->pipe_sigio); 441 pipeclose(peer); 442 } 443 } 444 445 /* 446 * Get a timestamp. 447 * 448 * This used to be vfs_timestamp but the higher precision is unnecessary and 449 * can very negatively affect performance in virtualized environments (e.g., on 450 * vms running on amd64 when using the rdtscp instruction). 451 */ 452 static void 453 pipe_timestamp(struct timespec *tsp) 454 { 455 456 getnanotime(tsp); 457 } 458 459 /* 460 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let 461 * the zone pick up the pieces via pipeclose(). 462 */ 463 int 464 kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1, 465 struct filecaps *fcaps2) 466 { 467 struct file *rf, *wf; 468 struct pipe *rpipe, *wpipe; 469 struct pipepair *pp; 470 int fd, fflags, error; 471 472 error = pipe_paircreate(td, &pp); 473 if (error != 0) 474 return (error); 475 rpipe = &pp->pp_rpipe; 476 wpipe = &pp->pp_wpipe; 477 error = falloc_caps(td, &rf, &fd, flags, fcaps1); 478 if (error) { 479 pipeclose(rpipe); 480 pipeclose(wpipe); 481 return (error); 482 } 483 /* An extra reference on `rf' has been held for us by falloc_caps(). */ 484 fildes[0] = fd; 485 486 fflags = FREAD | FWRITE; 487 if ((flags & O_NONBLOCK) != 0) 488 fflags |= FNONBLOCK; 489 490 /* 491 * Warning: once we've gotten past allocation of the fd for the 492 * read-side, we can only drop the read side via fdrop() in order 493 * to avoid races against processes which manage to dup() the read 494 * side while we are blocked trying to allocate the write side. 495 */ 496 finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops); 497 error = falloc_caps(td, &wf, &fd, flags, fcaps2); 498 if (error) { 499 fdclose(td, rf, fildes[0]); 500 fdrop(rf, td); 501 /* rpipe has been closed by fdrop(). */ 502 pipeclose(wpipe); 503 return (error); 504 } 505 /* An extra reference on `wf' has been held for us by falloc_caps(). */ 506 finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops); 507 fdrop(wf, td); 508 fildes[1] = fd; 509 fdrop(rf, td); 510 511 return (0); 512 } 513 514 #ifdef COMPAT_FREEBSD10 515 /* ARGSUSED */ 516 int 517 freebsd10_pipe(struct thread *td, struct freebsd10_pipe_args *uap __unused) 518 { 519 int error; 520 int fildes[2]; 521 522 error = kern_pipe(td, fildes, 0, NULL, NULL); 523 if (error) 524 return (error); 525 526 td->td_retval[0] = fildes[0]; 527 td->td_retval[1] = fildes[1]; 528 529 return (0); 530 } 531 #endif 532 533 int 534 sys_pipe2(struct thread *td, struct pipe2_args *uap) 535 { 536 int error, fildes[2]; 537 538 if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK)) 539 return (EINVAL); 540 error = kern_pipe(td, fildes, uap->flags, NULL, NULL); 541 if (error) 542 return (error); 543 error = copyout(fildes, uap->fildes, 2 * sizeof(int)); 544 if (error) { 545 (void)kern_close(td, fildes[0]); 546 (void)kern_close(td, fildes[1]); 547 } 548 return (error); 549 } 550 551 /* 552 * Allocate kva for pipe circular buffer, the space is pageable 553 * This routine will 'realloc' the size of a pipe safely, if it fails 554 * it will retain the old buffer. 555 * If it fails it will return ENOMEM. 556 */ 557 static int 558 pipespace_new(struct pipe *cpipe, int size) 559 { 560 caddr_t buffer; 561 int error, cnt, firstseg; 562 static int curfail = 0; 563 static struct timeval lastfail; 564 565 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked")); 566 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW), 567 ("pipespace: resize of direct writes not allowed")); 568 retry: 569 cnt = cpipe->pipe_buffer.cnt; 570 if (cnt > size) 571 size = cnt; 572 573 size = round_page(size); 574 buffer = (caddr_t) vm_map_min(pipe_map); 575 576 error = vm_map_find(pipe_map, NULL, 0, (vm_offset_t *)&buffer, size, 0, 577 VMFS_ANY_SPACE, VM_PROT_RW, VM_PROT_RW, 0); 578 if (error != KERN_SUCCESS) { 579 if (cpipe->pipe_buffer.buffer == NULL && 580 size > SMALL_PIPE_SIZE) { 581 size = SMALL_PIPE_SIZE; 582 pipefragretry++; 583 goto retry; 584 } 585 if (cpipe->pipe_buffer.buffer == NULL) { 586 pipeallocfail++; 587 if (ppsratecheck(&lastfail, &curfail, 1)) 588 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n"); 589 } else { 590 piperesizefail++; 591 } 592 return (ENOMEM); 593 } 594 595 /* copy data, then free old resources if we're resizing */ 596 if (cnt > 0) { 597 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) { 598 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out; 599 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 600 buffer, firstseg); 601 if ((cnt - firstseg) > 0) 602 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg], 603 cpipe->pipe_buffer.in); 604 } else { 605 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 606 buffer, cnt); 607 } 608 } 609 pipe_free_kmem(cpipe); 610 cpipe->pipe_buffer.buffer = buffer; 611 cpipe->pipe_buffer.size = size; 612 cpipe->pipe_buffer.in = cnt; 613 cpipe->pipe_buffer.out = 0; 614 cpipe->pipe_buffer.cnt = cnt; 615 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size); 616 return (0); 617 } 618 619 /* 620 * Wrapper for pipespace_new() that performs locking assertions. 621 */ 622 static int 623 pipespace(struct pipe *cpipe, int size) 624 { 625 626 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 627 ("Unlocked pipe passed to pipespace")); 628 return (pipespace_new(cpipe, size)); 629 } 630 631 /* 632 * lock a pipe for I/O, blocking other access 633 */ 634 static __inline int 635 pipelock(struct pipe *cpipe, int catch) 636 { 637 int error, prio; 638 639 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 640 641 prio = PRIBIO; 642 if (catch) 643 prio |= PCATCH; 644 while (cpipe->pipe_state & PIPE_LOCKFL) { 645 KASSERT(cpipe->pipe_waiters >= 0, 646 ("%s: bad waiter count %d", __func__, 647 cpipe->pipe_waiters)); 648 cpipe->pipe_waiters++; 649 error = msleep(&cpipe->pipe_waiters, PIPE_MTX(cpipe), prio, 650 "pipelk", 0); 651 cpipe->pipe_waiters--; 652 if (error != 0) 653 return (error); 654 } 655 cpipe->pipe_state |= PIPE_LOCKFL; 656 return (0); 657 } 658 659 /* 660 * unlock a pipe I/O lock 661 */ 662 static __inline void 663 pipeunlock(struct pipe *cpipe) 664 { 665 666 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 667 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 668 ("Unlocked pipe passed to pipeunlock")); 669 KASSERT(cpipe->pipe_waiters >= 0, 670 ("%s: bad waiter count %d", __func__, 671 cpipe->pipe_waiters)); 672 cpipe->pipe_state &= ~PIPE_LOCKFL; 673 if (cpipe->pipe_waiters > 0) 674 wakeup_one(&cpipe->pipe_waiters); 675 } 676 677 void 678 pipeselwakeup(struct pipe *cpipe) 679 { 680 681 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 682 if (cpipe->pipe_state & PIPE_SEL) { 683 selwakeuppri(&cpipe->pipe_sel, PSOCK); 684 if (!SEL_WAITING(&cpipe->pipe_sel)) 685 cpipe->pipe_state &= ~PIPE_SEL; 686 } 687 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 688 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 689 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0); 690 } 691 692 /* 693 * Initialize and allocate VM and memory for pipe. The structure 694 * will start out zero'd from the ctor, so we just manage the kmem. 695 */ 696 static int 697 pipe_create(struct pipe *pipe, bool large_backing) 698 { 699 int error; 700 701 error = pipespace_new(pipe, !large_backing || amountpipekva > 702 maxpipekva / 2 ? SMALL_PIPE_SIZE : PIPE_SIZE); 703 if (error == 0) 704 pipe->pipe_ino = alloc_unr64(&pipeino_unr); 705 return (error); 706 } 707 708 /* ARGSUSED */ 709 static int 710 pipe_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 711 int flags, struct thread *td) 712 { 713 struct pipe *rpipe; 714 int error; 715 int nread = 0; 716 int size; 717 718 rpipe = fp->f_data; 719 720 /* 721 * Try to avoid locking the pipe if we have nothing to do. 722 * 723 * There are programs which share one pipe amongst multiple processes 724 * and perform non-blocking reads in parallel, even if the pipe is 725 * empty. This in particular is the case with BSD make, which when 726 * spawned with a high -j number can find itself with over half of the 727 * calls failing to find anything. 728 */ 729 if ((fp->f_flag & FNONBLOCK) != 0 && !mac_pipe_check_read_enabled()) { 730 if (__predict_false(uio->uio_resid == 0)) 731 return (0); 732 if ((atomic_load_short(&rpipe->pipe_state) & PIPE_EOF) == 0 && 733 atomic_load_int(&rpipe->pipe_buffer.cnt) == 0 && 734 atomic_load_int(&rpipe->pipe_pages.cnt) == 0) 735 return (EAGAIN); 736 } 737 738 PIPE_LOCK(rpipe); 739 ++rpipe->pipe_busy; 740 error = pipelock(rpipe, 1); 741 if (error) 742 goto unlocked_error; 743 744 #ifdef MAC 745 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair); 746 if (error) 747 goto locked_error; 748 #endif 749 if (amountpipekva > (3 * maxpipekva) / 4) { 750 if ((rpipe->pipe_state & PIPE_DIRECTW) == 0 && 751 rpipe->pipe_buffer.size > SMALL_PIPE_SIZE && 752 rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE && 753 piperesizeallowed == 1) { 754 PIPE_UNLOCK(rpipe); 755 pipespace(rpipe, SMALL_PIPE_SIZE); 756 PIPE_LOCK(rpipe); 757 } 758 } 759 760 while (uio->uio_resid) { 761 /* 762 * normal pipe buffer receive 763 */ 764 if (rpipe->pipe_buffer.cnt > 0) { 765 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 766 if (size > rpipe->pipe_buffer.cnt) 767 size = rpipe->pipe_buffer.cnt; 768 if (size > uio->uio_resid) 769 size = uio->uio_resid; 770 771 PIPE_UNLOCK(rpipe); 772 error = uiomove( 773 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 774 size, uio); 775 PIPE_LOCK(rpipe); 776 if (error) 777 break; 778 779 rpipe->pipe_buffer.out += size; 780 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 781 rpipe->pipe_buffer.out = 0; 782 783 rpipe->pipe_buffer.cnt -= size; 784 785 /* 786 * If there is no more to read in the pipe, reset 787 * its pointers to the beginning. This improves 788 * cache hit stats. 789 */ 790 if (rpipe->pipe_buffer.cnt == 0) { 791 rpipe->pipe_buffer.in = 0; 792 rpipe->pipe_buffer.out = 0; 793 } 794 nread += size; 795 #ifndef PIPE_NODIRECT 796 /* 797 * Direct copy, bypassing a kernel buffer. 798 */ 799 } else if ((size = rpipe->pipe_pages.cnt) != 0) { 800 if (size > uio->uio_resid) 801 size = (u_int) uio->uio_resid; 802 PIPE_UNLOCK(rpipe); 803 error = uiomove_fromphys(rpipe->pipe_pages.ms, 804 rpipe->pipe_pages.pos, size, uio); 805 PIPE_LOCK(rpipe); 806 if (error) 807 break; 808 nread += size; 809 rpipe->pipe_pages.pos += size; 810 rpipe->pipe_pages.cnt -= size; 811 if (rpipe->pipe_pages.cnt == 0) { 812 rpipe->pipe_state &= ~PIPE_WANTW; 813 wakeup(rpipe); 814 } 815 #endif 816 } else { 817 /* 818 * detect EOF condition 819 * read returns 0 on EOF, no need to set error 820 */ 821 if (rpipe->pipe_state & PIPE_EOF) 822 break; 823 824 /* 825 * If the "write-side" has been blocked, wake it up now. 826 */ 827 if (rpipe->pipe_state & PIPE_WANTW) { 828 rpipe->pipe_state &= ~PIPE_WANTW; 829 wakeup(rpipe); 830 } 831 832 /* 833 * Break if some data was read. 834 */ 835 if (nread > 0) 836 break; 837 838 /* 839 * Unlock the pipe buffer for our remaining processing. 840 * We will either break out with an error or we will 841 * sleep and relock to loop. 842 */ 843 pipeunlock(rpipe); 844 845 /* 846 * Handle non-blocking mode operation or 847 * wait for more data. 848 */ 849 if (fp->f_flag & FNONBLOCK) { 850 error = EAGAIN; 851 } else { 852 rpipe->pipe_state |= PIPE_WANTR; 853 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 854 PRIBIO | PCATCH, 855 "piperd", 0)) == 0) 856 error = pipelock(rpipe, 1); 857 } 858 if (error) 859 goto unlocked_error; 860 } 861 } 862 #ifdef MAC 863 locked_error: 864 #endif 865 pipeunlock(rpipe); 866 867 /* XXX: should probably do this before getting any locks. */ 868 if (error == 0) 869 pipe_timestamp(&rpipe->pipe_atime); 870 unlocked_error: 871 --rpipe->pipe_busy; 872 873 /* 874 * PIPE_WANT processing only makes sense if pipe_busy is 0. 875 */ 876 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 877 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 878 wakeup(rpipe); 879 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 880 /* 881 * Handle write blocking hysteresis. 882 */ 883 if (rpipe->pipe_state & PIPE_WANTW) { 884 rpipe->pipe_state &= ~PIPE_WANTW; 885 wakeup(rpipe); 886 } 887 } 888 889 /* 890 * Only wake up writers if there was actually something read. 891 * Otherwise, when calling read(2) at EOF, a spurious wakeup occurs. 892 */ 893 if (nread > 0 && 894 rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt >= PIPE_BUF) 895 pipeselwakeup(rpipe); 896 897 PIPE_UNLOCK(rpipe); 898 if (nread > 0) 899 td->td_ru.ru_msgrcv++; 900 return (error); 901 } 902 903 #ifndef PIPE_NODIRECT 904 /* 905 * Map the sending processes' buffer into kernel space and wire it. 906 * This is similar to a physical write operation. 907 */ 908 static int 909 pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio) 910 { 911 u_int size; 912 int i; 913 914 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 915 KASSERT((wpipe->pipe_state & PIPE_DIRECTW) == 0, 916 ("%s: PIPE_DIRECTW set on %p", __func__, wpipe)); 917 KASSERT(wpipe->pipe_pages.cnt == 0, 918 ("%s: pipe map for %p contains residual data", __func__, wpipe)); 919 920 if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size) 921 size = wpipe->pipe_buffer.size; 922 else 923 size = uio->uio_iov->iov_len; 924 925 wpipe->pipe_state |= PIPE_DIRECTW; 926 PIPE_UNLOCK(wpipe); 927 i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 928 (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ, 929 wpipe->pipe_pages.ms, PIPENPAGES); 930 PIPE_LOCK(wpipe); 931 if (i < 0) { 932 wpipe->pipe_state &= ~PIPE_DIRECTW; 933 return (EFAULT); 934 } 935 936 wpipe->pipe_pages.npages = i; 937 wpipe->pipe_pages.pos = 938 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 939 wpipe->pipe_pages.cnt = size; 940 941 uio->uio_iov->iov_len -= size; 942 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; 943 if (uio->uio_iov->iov_len == 0) 944 uio->uio_iov++; 945 uio->uio_resid -= size; 946 uio->uio_offset += size; 947 return (0); 948 } 949 950 /* 951 * Unwire the process buffer. 952 */ 953 static void 954 pipe_destroy_write_buffer(struct pipe *wpipe) 955 { 956 957 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 958 KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0, 959 ("%s: PIPE_DIRECTW not set on %p", __func__, wpipe)); 960 KASSERT(wpipe->pipe_pages.cnt == 0, 961 ("%s: pipe map for %p contains residual data", __func__, wpipe)); 962 963 wpipe->pipe_state &= ~PIPE_DIRECTW; 964 vm_page_unhold_pages(wpipe->pipe_pages.ms, wpipe->pipe_pages.npages); 965 wpipe->pipe_pages.npages = 0; 966 } 967 968 /* 969 * In the case of a signal, the writing process might go away. This 970 * code copies the data into the circular buffer so that the source 971 * pages can be freed without loss of data. 972 */ 973 static void 974 pipe_clone_write_buffer(struct pipe *wpipe) 975 { 976 struct uio uio; 977 struct iovec iov; 978 int size; 979 int pos; 980 981 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 982 KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0, 983 ("%s: PIPE_DIRECTW not set on %p", __func__, wpipe)); 984 985 size = wpipe->pipe_pages.cnt; 986 pos = wpipe->pipe_pages.pos; 987 wpipe->pipe_pages.cnt = 0; 988 989 wpipe->pipe_buffer.in = size; 990 wpipe->pipe_buffer.out = 0; 991 wpipe->pipe_buffer.cnt = size; 992 993 PIPE_UNLOCK(wpipe); 994 iov.iov_base = wpipe->pipe_buffer.buffer; 995 iov.iov_len = size; 996 uio.uio_iov = &iov; 997 uio.uio_iovcnt = 1; 998 uio.uio_offset = 0; 999 uio.uio_resid = size; 1000 uio.uio_segflg = UIO_SYSSPACE; 1001 uio.uio_rw = UIO_READ; 1002 uio.uio_td = curthread; 1003 uiomove_fromphys(wpipe->pipe_pages.ms, pos, size, &uio); 1004 PIPE_LOCK(wpipe); 1005 pipe_destroy_write_buffer(wpipe); 1006 } 1007 1008 /* 1009 * This implements the pipe buffer write mechanism. Note that only 1010 * a direct write OR a normal pipe write can be pending at any given time. 1011 * If there are any characters in the pipe buffer, the direct write will 1012 * be deferred until the receiving process grabs all of the bytes from 1013 * the pipe buffer. Then the direct mapping write is set-up. 1014 */ 1015 static int 1016 pipe_direct_write(struct pipe *wpipe, struct uio *uio) 1017 { 1018 int error; 1019 1020 retry: 1021 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 1022 if ((wpipe->pipe_state & PIPE_EOF) != 0) { 1023 error = EPIPE; 1024 goto error1; 1025 } 1026 if (wpipe->pipe_state & PIPE_DIRECTW) { 1027 if (wpipe->pipe_state & PIPE_WANTR) { 1028 wpipe->pipe_state &= ~PIPE_WANTR; 1029 wakeup(wpipe); 1030 } 1031 pipeselwakeup(wpipe); 1032 wpipe->pipe_state |= PIPE_WANTW; 1033 pipeunlock(wpipe); 1034 error = msleep(wpipe, PIPE_MTX(wpipe), 1035 PRIBIO | PCATCH, "pipdww", 0); 1036 pipelock(wpipe, 0); 1037 if (error != 0) 1038 goto error1; 1039 goto retry; 1040 } 1041 if (wpipe->pipe_buffer.cnt > 0) { 1042 if (wpipe->pipe_state & PIPE_WANTR) { 1043 wpipe->pipe_state &= ~PIPE_WANTR; 1044 wakeup(wpipe); 1045 } 1046 pipeselwakeup(wpipe); 1047 wpipe->pipe_state |= PIPE_WANTW; 1048 pipeunlock(wpipe); 1049 error = msleep(wpipe, PIPE_MTX(wpipe), 1050 PRIBIO | PCATCH, "pipdwc", 0); 1051 pipelock(wpipe, 0); 1052 if (error != 0) 1053 goto error1; 1054 goto retry; 1055 } 1056 1057 error = pipe_build_write_buffer(wpipe, uio); 1058 if (error) { 1059 goto error1; 1060 } 1061 1062 while (wpipe->pipe_pages.cnt != 0 && 1063 (wpipe->pipe_state & PIPE_EOF) == 0) { 1064 if (wpipe->pipe_state & PIPE_WANTR) { 1065 wpipe->pipe_state &= ~PIPE_WANTR; 1066 wakeup(wpipe); 1067 } 1068 pipeselwakeup(wpipe); 1069 wpipe->pipe_state |= PIPE_WANTW; 1070 pipeunlock(wpipe); 1071 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 1072 "pipdwt", 0); 1073 pipelock(wpipe, 0); 1074 if (error != 0) 1075 break; 1076 } 1077 1078 if ((wpipe->pipe_state & PIPE_EOF) != 0) { 1079 wpipe->pipe_pages.cnt = 0; 1080 pipe_destroy_write_buffer(wpipe); 1081 pipeselwakeup(wpipe); 1082 error = EPIPE; 1083 } else if (error == EINTR || error == ERESTART) { 1084 pipe_clone_write_buffer(wpipe); 1085 } else { 1086 pipe_destroy_write_buffer(wpipe); 1087 } 1088 KASSERT((wpipe->pipe_state & PIPE_DIRECTW) == 0, 1089 ("pipe %p leaked PIPE_DIRECTW", wpipe)); 1090 return (error); 1091 1092 error1: 1093 wakeup(wpipe); 1094 return (error); 1095 } 1096 #endif 1097 1098 static int 1099 pipe_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 1100 int flags, struct thread *td) 1101 { 1102 struct pipe *wpipe, *rpipe; 1103 ssize_t orig_resid; 1104 int desiredsize, error; 1105 1106 rpipe = fp->f_data; 1107 wpipe = PIPE_PEER(rpipe); 1108 PIPE_LOCK(rpipe); 1109 error = pipelock(wpipe, 1); 1110 if (error) { 1111 PIPE_UNLOCK(rpipe); 1112 return (error); 1113 } 1114 /* 1115 * detect loss of pipe read side, issue SIGPIPE if lost. 1116 */ 1117 if (wpipe->pipe_present != PIPE_ACTIVE || 1118 (wpipe->pipe_state & PIPE_EOF)) { 1119 pipeunlock(wpipe); 1120 PIPE_UNLOCK(rpipe); 1121 return (EPIPE); 1122 } 1123 #ifdef MAC 1124 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair); 1125 if (error) { 1126 pipeunlock(wpipe); 1127 PIPE_UNLOCK(rpipe); 1128 return (error); 1129 } 1130 #endif 1131 ++wpipe->pipe_busy; 1132 1133 /* Choose a larger size if it's advantageous */ 1134 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size); 1135 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) { 1136 if (piperesizeallowed != 1) 1137 break; 1138 if (amountpipekva > maxpipekva / 2) 1139 break; 1140 if (desiredsize == BIG_PIPE_SIZE) 1141 break; 1142 desiredsize = desiredsize * 2; 1143 } 1144 1145 /* Choose a smaller size if we're in a OOM situation */ 1146 if (amountpipekva > (3 * maxpipekva) / 4 && 1147 wpipe->pipe_buffer.size > SMALL_PIPE_SIZE && 1148 wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE && 1149 piperesizeallowed == 1) 1150 desiredsize = SMALL_PIPE_SIZE; 1151 1152 /* Resize if the above determined that a new size was necessary */ 1153 if (desiredsize != wpipe->pipe_buffer.size && 1154 (wpipe->pipe_state & PIPE_DIRECTW) == 0) { 1155 PIPE_UNLOCK(wpipe); 1156 pipespace(wpipe, desiredsize); 1157 PIPE_LOCK(wpipe); 1158 } 1159 MPASS(wpipe->pipe_buffer.size != 0); 1160 1161 orig_resid = uio->uio_resid; 1162 1163 while (uio->uio_resid) { 1164 int space; 1165 1166 if (wpipe->pipe_state & PIPE_EOF) { 1167 error = EPIPE; 1168 break; 1169 } 1170 #ifndef PIPE_NODIRECT 1171 /* 1172 * If the transfer is large, we can gain performance if 1173 * we do process-to-process copies directly. 1174 * If the write is non-blocking, we don't use the 1175 * direct write mechanism. 1176 * 1177 * The direct write mechanism will detect the reader going 1178 * away on us. 1179 */ 1180 if (uio->uio_segflg == UIO_USERSPACE && 1181 uio->uio_iov->iov_len >= pipe_mindirect && 1182 wpipe->pipe_buffer.size >= pipe_mindirect && 1183 (fp->f_flag & FNONBLOCK) == 0) { 1184 error = pipe_direct_write(wpipe, uio); 1185 if (error != 0) 1186 break; 1187 continue; 1188 } 1189 #endif 1190 1191 /* 1192 * Pipe buffered writes cannot be coincidental with 1193 * direct writes. We wait until the currently executing 1194 * direct write is completed before we start filling the 1195 * pipe buffer. We break out if a signal occurs or the 1196 * reader goes away. 1197 */ 1198 if (wpipe->pipe_pages.cnt != 0) { 1199 if (wpipe->pipe_state & PIPE_WANTR) { 1200 wpipe->pipe_state &= ~PIPE_WANTR; 1201 wakeup(wpipe); 1202 } 1203 pipeselwakeup(wpipe); 1204 wpipe->pipe_state |= PIPE_WANTW; 1205 pipeunlock(wpipe); 1206 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 1207 "pipbww", 0); 1208 pipelock(wpipe, 0); 1209 if (error != 0) 1210 break; 1211 continue; 1212 } 1213 1214 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1215 1216 /* Writes of size <= PIPE_BUF must be atomic. */ 1217 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 1218 space = 0; 1219 1220 if (space > 0) { 1221 int size; /* Transfer size */ 1222 int segsize; /* first segment to transfer */ 1223 1224 /* 1225 * Transfer size is minimum of uio transfer 1226 * and free space in pipe buffer. 1227 */ 1228 if (space > uio->uio_resid) 1229 size = uio->uio_resid; 1230 else 1231 size = space; 1232 /* 1233 * First segment to transfer is minimum of 1234 * transfer size and contiguous space in 1235 * pipe buffer. If first segment to transfer 1236 * is less than the transfer size, we've got 1237 * a wraparound in the buffer. 1238 */ 1239 segsize = wpipe->pipe_buffer.size - 1240 wpipe->pipe_buffer.in; 1241 if (segsize > size) 1242 segsize = size; 1243 1244 /* Transfer first segment */ 1245 1246 PIPE_UNLOCK(rpipe); 1247 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1248 segsize, uio); 1249 PIPE_LOCK(rpipe); 1250 1251 if (error == 0 && segsize < size) { 1252 KASSERT(wpipe->pipe_buffer.in + segsize == 1253 wpipe->pipe_buffer.size, 1254 ("Pipe buffer wraparound disappeared")); 1255 /* 1256 * Transfer remaining part now, to 1257 * support atomic writes. Wraparound 1258 * happened. 1259 */ 1260 1261 PIPE_UNLOCK(rpipe); 1262 error = uiomove( 1263 &wpipe->pipe_buffer.buffer[0], 1264 size - segsize, uio); 1265 PIPE_LOCK(rpipe); 1266 } 1267 if (error == 0) { 1268 wpipe->pipe_buffer.in += size; 1269 if (wpipe->pipe_buffer.in >= 1270 wpipe->pipe_buffer.size) { 1271 KASSERT(wpipe->pipe_buffer.in == 1272 size - segsize + 1273 wpipe->pipe_buffer.size, 1274 ("Expected wraparound bad")); 1275 wpipe->pipe_buffer.in = size - segsize; 1276 } 1277 1278 wpipe->pipe_buffer.cnt += size; 1279 KASSERT(wpipe->pipe_buffer.cnt <= 1280 wpipe->pipe_buffer.size, 1281 ("Pipe buffer overflow")); 1282 } 1283 if (error != 0) 1284 break; 1285 continue; 1286 } else { 1287 /* 1288 * If the "read-side" has been blocked, wake it up now. 1289 */ 1290 if (wpipe->pipe_state & PIPE_WANTR) { 1291 wpipe->pipe_state &= ~PIPE_WANTR; 1292 wakeup(wpipe); 1293 } 1294 1295 /* 1296 * don't block on non-blocking I/O 1297 */ 1298 if (fp->f_flag & FNONBLOCK) { 1299 error = EAGAIN; 1300 break; 1301 } 1302 1303 /* 1304 * We have no more space and have something to offer, 1305 * wake up select/poll. 1306 */ 1307 pipeselwakeup(wpipe); 1308 1309 wpipe->pipe_state |= PIPE_WANTW; 1310 pipeunlock(wpipe); 1311 error = msleep(wpipe, PIPE_MTX(rpipe), 1312 PRIBIO | PCATCH, "pipewr", 0); 1313 pipelock(wpipe, 0); 1314 if (error != 0) 1315 break; 1316 continue; 1317 } 1318 } 1319 1320 --wpipe->pipe_busy; 1321 1322 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1323 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1324 wakeup(wpipe); 1325 } else if (wpipe->pipe_buffer.cnt > 0) { 1326 /* 1327 * If we have put any characters in the buffer, we wake up 1328 * the reader. 1329 */ 1330 if (wpipe->pipe_state & PIPE_WANTR) { 1331 wpipe->pipe_state &= ~PIPE_WANTR; 1332 wakeup(wpipe); 1333 } 1334 } 1335 1336 /* 1337 * Don't return EPIPE if any byte was written. 1338 * EINTR and other interrupts are handled by generic I/O layer. 1339 * Do not pretend that I/O succeeded for obvious user error 1340 * like EFAULT. 1341 */ 1342 if (uio->uio_resid != orig_resid && error == EPIPE) 1343 error = 0; 1344 1345 if (error == 0) 1346 pipe_timestamp(&wpipe->pipe_mtime); 1347 1348 /* 1349 * We have something to offer, 1350 * wake up select/poll. 1351 */ 1352 if (wpipe->pipe_buffer.cnt) 1353 pipeselwakeup(wpipe); 1354 1355 pipeunlock(wpipe); 1356 PIPE_UNLOCK(rpipe); 1357 if (uio->uio_resid != orig_resid) 1358 td->td_ru.ru_msgsnd++; 1359 return (error); 1360 } 1361 1362 /* ARGSUSED */ 1363 static int 1364 pipe_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1365 struct thread *td) 1366 { 1367 struct pipe *cpipe; 1368 int error; 1369 1370 cpipe = fp->f_data; 1371 if (cpipe->pipe_type & PIPE_TYPE_NAMED) 1372 error = vnops.fo_truncate(fp, length, active_cred, td); 1373 else 1374 error = invfo_truncate(fp, length, active_cred, td); 1375 return (error); 1376 } 1377 1378 /* 1379 * we implement a very minimal set of ioctls for compatibility with sockets. 1380 */ 1381 static int 1382 pipe_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred, 1383 struct thread *td) 1384 { 1385 struct pipe *mpipe = fp->f_data; 1386 int error; 1387 1388 PIPE_LOCK(mpipe); 1389 1390 #ifdef MAC 1391 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data); 1392 if (error) { 1393 PIPE_UNLOCK(mpipe); 1394 return (error); 1395 } 1396 #endif 1397 1398 error = 0; 1399 switch (cmd) { 1400 case FIONBIO: 1401 break; 1402 1403 case FIOASYNC: 1404 if (*(int *)data) { 1405 mpipe->pipe_state |= PIPE_ASYNC; 1406 } else { 1407 mpipe->pipe_state &= ~PIPE_ASYNC; 1408 } 1409 break; 1410 1411 case FIONREAD: 1412 if (!(fp->f_flag & FREAD)) { 1413 *(int *)data = 0; 1414 PIPE_UNLOCK(mpipe); 1415 return (0); 1416 } 1417 if (mpipe->pipe_pages.cnt != 0) 1418 *(int *)data = mpipe->pipe_pages.cnt; 1419 else 1420 *(int *)data = mpipe->pipe_buffer.cnt; 1421 break; 1422 1423 case FIOSETOWN: 1424 PIPE_UNLOCK(mpipe); 1425 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1426 goto out_unlocked; 1427 1428 case FIOGETOWN: 1429 *(int *)data = fgetown(&mpipe->pipe_sigio); 1430 break; 1431 1432 /* This is deprecated, FIOSETOWN should be used instead. */ 1433 case TIOCSPGRP: 1434 PIPE_UNLOCK(mpipe); 1435 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1436 goto out_unlocked; 1437 1438 /* This is deprecated, FIOGETOWN should be used instead. */ 1439 case TIOCGPGRP: 1440 *(int *)data = -fgetown(&mpipe->pipe_sigio); 1441 break; 1442 1443 default: 1444 error = ENOTTY; 1445 break; 1446 } 1447 PIPE_UNLOCK(mpipe); 1448 out_unlocked: 1449 return (error); 1450 } 1451 1452 static int 1453 pipe_poll(struct file *fp, int events, struct ucred *active_cred, 1454 struct thread *td) 1455 { 1456 struct pipe *rpipe; 1457 struct pipe *wpipe; 1458 int levents, revents; 1459 #ifdef MAC 1460 int error; 1461 #endif 1462 1463 revents = 0; 1464 rpipe = fp->f_data; 1465 wpipe = PIPE_PEER(rpipe); 1466 PIPE_LOCK(rpipe); 1467 #ifdef MAC 1468 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair); 1469 if (error) 1470 goto locked_error; 1471 #endif 1472 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) 1473 if (rpipe->pipe_pages.cnt > 0 || rpipe->pipe_buffer.cnt > 0) 1474 revents |= events & (POLLIN | POLLRDNORM); 1475 1476 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) 1477 if (wpipe->pipe_present != PIPE_ACTIVE || 1478 (wpipe->pipe_state & PIPE_EOF) || 1479 ((wpipe->pipe_state & PIPE_DIRECTW) == 0 && 1480 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF || 1481 wpipe->pipe_buffer.size == 0))) 1482 revents |= events & (POLLOUT | POLLWRNORM); 1483 1484 levents = events & 1485 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND); 1486 if (rpipe->pipe_type & PIPE_TYPE_NAMED && fp->f_flag & FREAD && levents && 1487 fp->f_pipegen == rpipe->pipe_wgen) 1488 events |= POLLINIGNEOF; 1489 1490 if ((events & POLLINIGNEOF) == 0) { 1491 if (rpipe->pipe_state & PIPE_EOF) { 1492 if (fp->f_flag & FREAD) 1493 revents |= (events & (POLLIN | POLLRDNORM)); 1494 if (wpipe->pipe_present != PIPE_ACTIVE || 1495 (wpipe->pipe_state & PIPE_EOF)) 1496 revents |= POLLHUP; 1497 } 1498 } 1499 1500 if (revents == 0) { 1501 /* 1502 * Add ourselves regardless of eventmask as we have to return 1503 * POLLHUP even if it was not asked for. 1504 */ 1505 if ((fp->f_flag & FREAD) != 0) { 1506 selrecord(td, &rpipe->pipe_sel); 1507 if (SEL_WAITING(&rpipe->pipe_sel)) 1508 rpipe->pipe_state |= PIPE_SEL; 1509 } 1510 1511 if ((fp->f_flag & FWRITE) != 0 && 1512 wpipe->pipe_present == PIPE_ACTIVE) { 1513 selrecord(td, &wpipe->pipe_sel); 1514 if (SEL_WAITING(&wpipe->pipe_sel)) 1515 wpipe->pipe_state |= PIPE_SEL; 1516 } 1517 } 1518 #ifdef MAC 1519 locked_error: 1520 #endif 1521 PIPE_UNLOCK(rpipe); 1522 1523 return (revents); 1524 } 1525 1526 /* 1527 * We shouldn't need locks here as we're doing a read and this should 1528 * be a natural race. 1529 */ 1530 static int 1531 pipe_stat(struct file *fp, struct stat *ub, struct ucred *active_cred) 1532 { 1533 struct pipe *pipe; 1534 #ifdef MAC 1535 int error; 1536 #endif 1537 1538 pipe = fp->f_data; 1539 #ifdef MAC 1540 if (mac_pipe_check_stat_enabled()) { 1541 PIPE_LOCK(pipe); 1542 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair); 1543 PIPE_UNLOCK(pipe); 1544 if (error) { 1545 return (error); 1546 } 1547 } 1548 #endif 1549 1550 /* For named pipes ask the underlying filesystem. */ 1551 if (pipe->pipe_type & PIPE_TYPE_NAMED) { 1552 return (vnops.fo_stat(fp, ub, active_cred)); 1553 } 1554 1555 bzero(ub, sizeof(*ub)); 1556 ub->st_mode = S_IFIFO; 1557 ub->st_blksize = PAGE_SIZE; 1558 if (pipe->pipe_pages.cnt != 0) 1559 ub->st_size = pipe->pipe_pages.cnt; 1560 else 1561 ub->st_size = pipe->pipe_buffer.cnt; 1562 ub->st_blocks = howmany(ub->st_size, ub->st_blksize); 1563 ub->st_atim = pipe->pipe_atime; 1564 ub->st_mtim = pipe->pipe_mtime; 1565 ub->st_ctim = pipe->pipe_ctime; 1566 ub->st_uid = fp->f_cred->cr_uid; 1567 ub->st_gid = fp->f_cred->cr_gid; 1568 ub->st_dev = pipedev_ino; 1569 ub->st_ino = pipe->pipe_ino; 1570 /* 1571 * Left as 0: st_nlink, st_rdev, st_flags, st_gen. 1572 */ 1573 return (0); 1574 } 1575 1576 /* ARGSUSED */ 1577 static int 1578 pipe_close(struct file *fp, struct thread *td) 1579 { 1580 1581 if (fp->f_vnode != NULL) 1582 return vnops.fo_close(fp, td); 1583 fp->f_ops = &badfileops; 1584 pipe_dtor(fp->f_data); 1585 fp->f_data = NULL; 1586 return (0); 1587 } 1588 1589 static int 1590 pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td) 1591 { 1592 struct pipe *cpipe; 1593 int error; 1594 1595 cpipe = fp->f_data; 1596 if (cpipe->pipe_type & PIPE_TYPE_NAMED) 1597 error = vn_chmod(fp, mode, active_cred, td); 1598 else 1599 error = invfo_chmod(fp, mode, active_cred, td); 1600 return (error); 1601 } 1602 1603 static int 1604 pipe_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 1605 struct thread *td) 1606 { 1607 struct pipe *cpipe; 1608 int error; 1609 1610 cpipe = fp->f_data; 1611 if (cpipe->pipe_type & PIPE_TYPE_NAMED) 1612 error = vn_chown(fp, uid, gid, active_cred, td); 1613 else 1614 error = invfo_chown(fp, uid, gid, active_cred, td); 1615 return (error); 1616 } 1617 1618 static int 1619 pipe_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 1620 { 1621 struct pipe *pi; 1622 1623 if (fp->f_type == DTYPE_FIFO) 1624 return (vn_fill_kinfo(fp, kif, fdp)); 1625 kif->kf_type = KF_TYPE_PIPE; 1626 pi = fp->f_data; 1627 kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi; 1628 kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer; 1629 kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt; 1630 kif->kf_un.kf_pipe.kf_pipe_buffer_in = pi->pipe_buffer.in; 1631 kif->kf_un.kf_pipe.kf_pipe_buffer_out = pi->pipe_buffer.out; 1632 kif->kf_un.kf_pipe.kf_pipe_buffer_size = pi->pipe_buffer.size; 1633 return (0); 1634 } 1635 1636 static void 1637 pipe_free_kmem(struct pipe *cpipe) 1638 { 1639 1640 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), 1641 ("pipe_free_kmem: pipe mutex locked")); 1642 1643 if (cpipe->pipe_buffer.buffer != NULL) { 1644 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size); 1645 vm_map_remove(pipe_map, 1646 (vm_offset_t)cpipe->pipe_buffer.buffer, 1647 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size); 1648 cpipe->pipe_buffer.buffer = NULL; 1649 } 1650 #ifndef PIPE_NODIRECT 1651 { 1652 cpipe->pipe_pages.cnt = 0; 1653 cpipe->pipe_pages.pos = 0; 1654 cpipe->pipe_pages.npages = 0; 1655 } 1656 #endif 1657 } 1658 1659 /* 1660 * shutdown the pipe 1661 */ 1662 static void 1663 pipeclose(struct pipe *cpipe) 1664 { 1665 #ifdef MAC 1666 struct pipepair *pp; 1667 #endif 1668 struct pipe *ppipe; 1669 1670 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL")); 1671 1672 PIPE_LOCK(cpipe); 1673 pipelock(cpipe, 0); 1674 #ifdef MAC 1675 pp = cpipe->pipe_pair; 1676 #endif 1677 1678 /* 1679 * If the other side is blocked, wake it up saying that 1680 * we want to close it down. 1681 */ 1682 cpipe->pipe_state |= PIPE_EOF; 1683 while (cpipe->pipe_busy) { 1684 wakeup(cpipe); 1685 cpipe->pipe_state |= PIPE_WANT; 1686 pipeunlock(cpipe); 1687 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1688 pipelock(cpipe, 0); 1689 } 1690 1691 pipeselwakeup(cpipe); 1692 1693 /* 1694 * Disconnect from peer, if any. 1695 */ 1696 ppipe = cpipe->pipe_peer; 1697 if (ppipe->pipe_present == PIPE_ACTIVE) { 1698 ppipe->pipe_state |= PIPE_EOF; 1699 wakeup(ppipe); 1700 pipeselwakeup(ppipe); 1701 } 1702 1703 /* 1704 * Mark this endpoint as free. Release kmem resources. We 1705 * don't mark this endpoint as unused until we've finished 1706 * doing that, or the pipe might disappear out from under 1707 * us. 1708 */ 1709 PIPE_UNLOCK(cpipe); 1710 pipe_free_kmem(cpipe); 1711 PIPE_LOCK(cpipe); 1712 cpipe->pipe_present = PIPE_CLOSING; 1713 pipeunlock(cpipe); 1714 1715 /* 1716 * knlist_clear() may sleep dropping the PIPE_MTX. Set the 1717 * PIPE_FINALIZED, that allows other end to free the 1718 * pipe_pair, only after the knotes are completely dismantled. 1719 */ 1720 knlist_clear(&cpipe->pipe_sel.si_note, 1); 1721 cpipe->pipe_present = PIPE_FINALIZED; 1722 seldrain(&cpipe->pipe_sel); 1723 knlist_destroy(&cpipe->pipe_sel.si_note); 1724 1725 /* 1726 * If both endpoints are now closed, release the memory for the 1727 * pipe pair. If not, unlock. 1728 */ 1729 if (ppipe->pipe_present == PIPE_FINALIZED) { 1730 PIPE_UNLOCK(cpipe); 1731 #ifdef MAC 1732 mac_pipe_destroy(pp); 1733 #endif 1734 uma_zfree(pipe_zone, cpipe->pipe_pair); 1735 } else 1736 PIPE_UNLOCK(cpipe); 1737 } 1738 1739 /*ARGSUSED*/ 1740 static int 1741 pipe_kqfilter(struct file *fp, struct knote *kn) 1742 { 1743 struct pipe *cpipe; 1744 1745 /* 1746 * If a filter is requested that is not supported by this file 1747 * descriptor, don't return an error, but also don't ever generate an 1748 * event. 1749 */ 1750 if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) { 1751 kn->kn_fop = &pipe_nfiltops; 1752 return (0); 1753 } 1754 if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) { 1755 kn->kn_fop = &pipe_nfiltops; 1756 return (0); 1757 } 1758 cpipe = fp->f_data; 1759 PIPE_LOCK(cpipe); 1760 switch (kn->kn_filter) { 1761 case EVFILT_READ: 1762 kn->kn_fop = &pipe_rfiltops; 1763 break; 1764 case EVFILT_WRITE: 1765 kn->kn_fop = &pipe_wfiltops; 1766 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) { 1767 /* other end of pipe has been closed */ 1768 PIPE_UNLOCK(cpipe); 1769 return (EPIPE); 1770 } 1771 cpipe = PIPE_PEER(cpipe); 1772 break; 1773 default: 1774 if ((cpipe->pipe_type & PIPE_TYPE_NAMED) != 0) { 1775 PIPE_UNLOCK(cpipe); 1776 return (vnops.fo_kqfilter(fp, kn)); 1777 } 1778 PIPE_UNLOCK(cpipe); 1779 return (EINVAL); 1780 } 1781 1782 kn->kn_hook = cpipe; 1783 knlist_add(&cpipe->pipe_sel.si_note, kn, 1); 1784 PIPE_UNLOCK(cpipe); 1785 return (0); 1786 } 1787 1788 static void 1789 filt_pipedetach(struct knote *kn) 1790 { 1791 struct pipe *cpipe = kn->kn_hook; 1792 1793 PIPE_LOCK(cpipe); 1794 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1); 1795 PIPE_UNLOCK(cpipe); 1796 } 1797 1798 /*ARGSUSED*/ 1799 static int 1800 filt_piperead(struct knote *kn, long hint) 1801 { 1802 struct file *fp = kn->kn_fp; 1803 struct pipe *rpipe = kn->kn_hook; 1804 1805 PIPE_LOCK_ASSERT(rpipe, MA_OWNED); 1806 kn->kn_data = rpipe->pipe_buffer.cnt; 1807 if (kn->kn_data == 0) 1808 kn->kn_data = rpipe->pipe_pages.cnt; 1809 1810 if ((rpipe->pipe_state & PIPE_EOF) != 0 && 1811 ((rpipe->pipe_type & PIPE_TYPE_NAMED) == 0 || 1812 fp->f_pipegen != rpipe->pipe_wgen)) { 1813 kn->kn_flags |= EV_EOF; 1814 return (1); 1815 } 1816 kn->kn_flags &= ~EV_EOF; 1817 return (kn->kn_data > 0); 1818 } 1819 1820 /*ARGSUSED*/ 1821 static int 1822 filt_pipewrite(struct knote *kn, long hint) 1823 { 1824 struct pipe *wpipe = kn->kn_hook; 1825 1826 /* 1827 * If this end of the pipe is closed, the knote was removed from the 1828 * knlist and the list lock (i.e., the pipe lock) is therefore not held. 1829 */ 1830 if (wpipe->pipe_present == PIPE_ACTIVE || 1831 (wpipe->pipe_type & PIPE_TYPE_NAMED) != 0) { 1832 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 1833 1834 if (wpipe->pipe_state & PIPE_DIRECTW) { 1835 kn->kn_data = 0; 1836 } else if (wpipe->pipe_buffer.size > 0) { 1837 kn->kn_data = wpipe->pipe_buffer.size - 1838 wpipe->pipe_buffer.cnt; 1839 } else { 1840 kn->kn_data = PIPE_BUF; 1841 } 1842 } 1843 1844 if (wpipe->pipe_present != PIPE_ACTIVE || 1845 (wpipe->pipe_state & PIPE_EOF)) { 1846 kn->kn_flags |= EV_EOF; 1847 return (1); 1848 } 1849 kn->kn_flags &= ~EV_EOF; 1850 return (kn->kn_data >= PIPE_BUF); 1851 } 1852 1853 static void 1854 filt_pipedetach_notsup(struct knote *kn) 1855 { 1856 1857 } 1858 1859 static int 1860 filt_pipenotsup(struct knote *kn, long hint) 1861 { 1862 1863 return (0); 1864 } 1865