1 /*- 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 */ 19 20 /* 21 * This file contains a high-performance replacement for the socket-based 22 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 23 * all features of sockets, but does do everything that pipes normally 24 * do. 25 */ 26 27 /* 28 * This code has two modes of operation, a small write mode and a large 29 * write mode. The small write mode acts like conventional pipes with 30 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 31 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 32 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 33 * the receiving process can copy it directly from the pages in the sending 34 * process. 35 * 36 * If the sending process receives a signal, it is possible that it will 37 * go away, and certainly its address space can change, because control 38 * is returned back to the user-mode side. In that case, the pipe code 39 * arranges to copy the buffer supplied by the user process, to a pageable 40 * kernel buffer, and the receiving process will grab the data from the 41 * pageable kernel buffer. Since signals don't happen all that often, 42 * the copy operation is normally eliminated. 43 * 44 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 45 * happen for small transfers so that the system will not spend all of 46 * its time context switching. 47 * 48 * In order to limit the resource use of pipes, two sysctls exist: 49 * 50 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable 51 * address space available to us in pipe_map. This value is normally 52 * autotuned, but may also be loader tuned. 53 * 54 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of 55 * memory in use by pipes. 56 * 57 * Based on how large pipekva is relative to maxpipekva, the following 58 * will happen: 59 * 60 * 0% - 50%: 61 * New pipes are given 16K of memory backing, pipes may dynamically 62 * grow to as large as 64K where needed. 63 * 50% - 75%: 64 * New pipes are given 4K (or PAGE_SIZE) of memory backing, 65 * existing pipes may NOT grow. 66 * 75% - 100%: 67 * New pipes are given 4K (or PAGE_SIZE) of memory backing, 68 * existing pipes will be shrunk down to 4K whenever possible. 69 * 70 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If 71 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE 72 * resize which MUST occur for reverse-direction pipes when they are 73 * first used. 74 * 75 * Additional information about the current state of pipes may be obtained 76 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail, 77 * and kern.ipc.piperesizefail. 78 * 79 * Locking rules: There are two locks present here: A mutex, used via 80 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via 81 * the flag, as mutexes can not persist over uiomove. The mutex 82 * exists only to guard access to the flag, and is not in itself a 83 * locking mechanism. Also note that there is only a single mutex for 84 * both directions of a pipe. 85 * 86 * As pipelock() may have to sleep before it can acquire the flag, it 87 * is important to reread all data after a call to pipelock(); everything 88 * in the structure may have changed. 89 */ 90 91 #include <sys/cdefs.h> 92 __FBSDID("$FreeBSD$"); 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/fcntl.h> 97 #include <sys/file.h> 98 #include <sys/filedesc.h> 99 #include <sys/filio.h> 100 #include <sys/kernel.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 #include <sys/ttycom.h> 104 #include <sys/stat.h> 105 #include <sys/malloc.h> 106 #include <sys/poll.h> 107 #include <sys/selinfo.h> 108 #include <sys/signalvar.h> 109 #include <sys/syscallsubr.h> 110 #include <sys/sysctl.h> 111 #include <sys/sysproto.h> 112 #include <sys/pipe.h> 113 #include <sys/proc.h> 114 #include <sys/vnode.h> 115 #include <sys/uio.h> 116 #include <sys/event.h> 117 118 #include <security/mac/mac_framework.h> 119 120 #include <vm/vm.h> 121 #include <vm/vm_param.h> 122 #include <vm/vm_object.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_extern.h> 125 #include <vm/pmap.h> 126 #include <vm/vm_map.h> 127 #include <vm/vm_page.h> 128 #include <vm/uma.h> 129 130 /* 131 * Use this define if you want to disable *fancy* VM things. Expect an 132 * approx 30% decrease in transfer rate. This could be useful for 133 * NetBSD or OpenBSD. 134 */ 135 /* #define PIPE_NODIRECT */ 136 137 /* 138 * interfaces to the outside world 139 */ 140 static fo_rdwr_t pipe_read; 141 static fo_rdwr_t pipe_write; 142 static fo_truncate_t pipe_truncate; 143 static fo_ioctl_t pipe_ioctl; 144 static fo_poll_t pipe_poll; 145 static fo_kqfilter_t pipe_kqfilter; 146 static fo_stat_t pipe_stat; 147 static fo_close_t pipe_close; 148 149 static struct fileops pipeops = { 150 .fo_read = pipe_read, 151 .fo_write = pipe_write, 152 .fo_truncate = pipe_truncate, 153 .fo_ioctl = pipe_ioctl, 154 .fo_poll = pipe_poll, 155 .fo_kqfilter = pipe_kqfilter, 156 .fo_stat = pipe_stat, 157 .fo_close = pipe_close, 158 .fo_flags = DFLAG_PASSABLE 159 }; 160 161 static void filt_pipedetach(struct knote *kn); 162 static int filt_piperead(struct knote *kn, long hint); 163 static int filt_pipewrite(struct knote *kn, long hint); 164 165 static struct filterops pipe_rfiltops = 166 { 1, NULL, filt_pipedetach, filt_piperead }; 167 static struct filterops pipe_wfiltops = 168 { 1, NULL, filt_pipedetach, filt_pipewrite }; 169 170 /* 171 * Default pipe buffer size(s), this can be kind-of large now because pipe 172 * space is pageable. The pipe code will try to maintain locality of 173 * reference for performance reasons, so small amounts of outstanding I/O 174 * will not wipe the cache. 175 */ 176 #define MINPIPESIZE (PIPE_SIZE/3) 177 #define MAXPIPESIZE (2*PIPE_SIZE/3) 178 179 static long amountpipekva; 180 static int pipefragretry; 181 static int pipeallocfail; 182 static int piperesizefail; 183 static int piperesizeallowed = 1; 184 185 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN, 186 &maxpipekva, 0, "Pipe KVA limit"); 187 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD, 188 &amountpipekva, 0, "Pipe KVA usage"); 189 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD, 190 &pipefragretry, 0, "Pipe allocation retries due to fragmentation"); 191 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD, 192 &pipeallocfail, 0, "Pipe allocation failures"); 193 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD, 194 &piperesizefail, 0, "Pipe resize failures"); 195 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW, 196 &piperesizeallowed, 0, "Pipe resizing allowed"); 197 198 static void pipeinit(void *dummy __unused); 199 static void pipeclose(struct pipe *cpipe); 200 static void pipe_free_kmem(struct pipe *cpipe); 201 static int pipe_create(struct pipe *pipe, int backing); 202 static __inline int pipelock(struct pipe *cpipe, int catch); 203 static __inline void pipeunlock(struct pipe *cpipe); 204 static __inline void pipeselwakeup(struct pipe *cpipe); 205 #ifndef PIPE_NODIRECT 206 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 207 static void pipe_destroy_write_buffer(struct pipe *wpipe); 208 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 209 static void pipe_clone_write_buffer(struct pipe *wpipe); 210 #endif 211 static int pipespace(struct pipe *cpipe, int size); 212 static int pipespace_new(struct pipe *cpipe, int size); 213 214 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags); 215 static int pipe_zone_init(void *mem, int size, int flags); 216 static void pipe_zone_fini(void *mem, int size); 217 218 static uma_zone_t pipe_zone; 219 220 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 221 222 static void 223 pipeinit(void *dummy __unused) 224 { 225 226 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair), 227 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini, 228 UMA_ALIGN_PTR, 0); 229 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 230 } 231 232 static int 233 pipe_zone_ctor(void *mem, int size, void *arg, int flags) 234 { 235 struct pipepair *pp; 236 struct pipe *rpipe, *wpipe; 237 238 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size")); 239 240 pp = (struct pipepair *)mem; 241 242 /* 243 * We zero both pipe endpoints to make sure all the kmem pointers 244 * are NULL, flag fields are zero'd, etc. We timestamp both 245 * endpoints with the same time. 246 */ 247 rpipe = &pp->pp_rpipe; 248 bzero(rpipe, sizeof(*rpipe)); 249 vfs_timestamp(&rpipe->pipe_ctime); 250 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime; 251 252 wpipe = &pp->pp_wpipe; 253 bzero(wpipe, sizeof(*wpipe)); 254 wpipe->pipe_ctime = rpipe->pipe_ctime; 255 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime; 256 257 rpipe->pipe_peer = wpipe; 258 rpipe->pipe_pair = pp; 259 wpipe->pipe_peer = rpipe; 260 wpipe->pipe_pair = pp; 261 262 /* 263 * Mark both endpoints as present; they will later get free'd 264 * one at a time. When both are free'd, then the whole pair 265 * is released. 266 */ 267 rpipe->pipe_present = PIPE_ACTIVE; 268 wpipe->pipe_present = PIPE_ACTIVE; 269 270 /* 271 * Eventually, the MAC Framework may initialize the label 272 * in ctor or init, but for now we do it elswhere to avoid 273 * blocking in ctor or init. 274 */ 275 pp->pp_label = NULL; 276 277 return (0); 278 } 279 280 static int 281 pipe_zone_init(void *mem, int size, int flags) 282 { 283 struct pipepair *pp; 284 285 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size")); 286 287 pp = (struct pipepair *)mem; 288 289 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE); 290 return (0); 291 } 292 293 static void 294 pipe_zone_fini(void *mem, int size) 295 { 296 struct pipepair *pp; 297 298 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size")); 299 300 pp = (struct pipepair *)mem; 301 302 mtx_destroy(&pp->pp_mtx); 303 } 304 305 /* 306 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let 307 * the zone pick up the pieces via pipeclose(). 308 */ 309 int 310 kern_pipe(struct thread *td, int fildes[2]) 311 { 312 struct filedesc *fdp = td->td_proc->p_fd; 313 struct file *rf, *wf; 314 struct pipepair *pp; 315 struct pipe *rpipe, *wpipe; 316 int fd, error; 317 318 pp = uma_zalloc(pipe_zone, M_WAITOK); 319 #ifdef MAC 320 /* 321 * The MAC label is shared between the connected endpoints. As a 322 * result mac_pipe_init() and mac_pipe_create() are called once 323 * for the pair, and not on the endpoints. 324 */ 325 mac_pipe_init(pp); 326 mac_pipe_create(td->td_ucred, pp); 327 #endif 328 rpipe = &pp->pp_rpipe; 329 wpipe = &pp->pp_wpipe; 330 331 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe)); 332 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe)); 333 334 /* Only the forward direction pipe is backed by default */ 335 if ((error = pipe_create(rpipe, 1)) != 0 || 336 (error = pipe_create(wpipe, 0)) != 0) { 337 pipeclose(rpipe); 338 pipeclose(wpipe); 339 return (error); 340 } 341 342 rpipe->pipe_state |= PIPE_DIRECTOK; 343 wpipe->pipe_state |= PIPE_DIRECTOK; 344 345 error = falloc(td, &rf, &fd); 346 if (error) { 347 pipeclose(rpipe); 348 pipeclose(wpipe); 349 return (error); 350 } 351 /* An extra reference on `rf' has been held for us by falloc(). */ 352 fildes[0] = fd; 353 354 /* 355 * Warning: once we've gotten past allocation of the fd for the 356 * read-side, we can only drop the read side via fdrop() in order 357 * to avoid races against processes which manage to dup() the read 358 * side while we are blocked trying to allocate the write side. 359 */ 360 finit(rf, FREAD | FWRITE, DTYPE_PIPE, rpipe, &pipeops); 361 error = falloc(td, &wf, &fd); 362 if (error) { 363 fdclose(fdp, rf, fildes[0], td); 364 fdrop(rf, td); 365 /* rpipe has been closed by fdrop(). */ 366 pipeclose(wpipe); 367 return (error); 368 } 369 /* An extra reference on `wf' has been held for us by falloc(). */ 370 finit(wf, FREAD | FWRITE, DTYPE_PIPE, wpipe, &pipeops); 371 fdrop(wf, td); 372 fildes[1] = fd; 373 fdrop(rf, td); 374 375 return (0); 376 } 377 378 /* ARGSUSED */ 379 int 380 pipe(struct thread *td, struct pipe_args *uap) 381 { 382 int error; 383 int fildes[2]; 384 385 error = kern_pipe(td, fildes); 386 if (error) 387 return (error); 388 389 td->td_retval[0] = fildes[0]; 390 td->td_retval[1] = fildes[1]; 391 392 return (0); 393 } 394 395 /* 396 * Allocate kva for pipe circular buffer, the space is pageable 397 * This routine will 'realloc' the size of a pipe safely, if it fails 398 * it will retain the old buffer. 399 * If it fails it will return ENOMEM. 400 */ 401 static int 402 pipespace_new(cpipe, size) 403 struct pipe *cpipe; 404 int size; 405 { 406 caddr_t buffer; 407 int error, cnt, firstseg; 408 static int curfail = 0; 409 static struct timeval lastfail; 410 411 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked")); 412 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW), 413 ("pipespace: resize of direct writes not allowed")); 414 retry: 415 cnt = cpipe->pipe_buffer.cnt; 416 if (cnt > size) 417 size = cnt; 418 419 size = round_page(size); 420 buffer = (caddr_t) vm_map_min(pipe_map); 421 422 error = vm_map_find(pipe_map, NULL, 0, 423 (vm_offset_t *) &buffer, size, 1, 424 VM_PROT_ALL, VM_PROT_ALL, 0); 425 if (error != KERN_SUCCESS) { 426 if ((cpipe->pipe_buffer.buffer == NULL) && 427 (size > SMALL_PIPE_SIZE)) { 428 size = SMALL_PIPE_SIZE; 429 pipefragretry++; 430 goto retry; 431 } 432 if (cpipe->pipe_buffer.buffer == NULL) { 433 pipeallocfail++; 434 if (ppsratecheck(&lastfail, &curfail, 1)) 435 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n"); 436 } else { 437 piperesizefail++; 438 } 439 return (ENOMEM); 440 } 441 442 /* copy data, then free old resources if we're resizing */ 443 if (cnt > 0) { 444 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) { 445 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out; 446 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 447 buffer, firstseg); 448 if ((cnt - firstseg) > 0) 449 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg], 450 cpipe->pipe_buffer.in); 451 } else { 452 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 453 buffer, cnt); 454 } 455 } 456 pipe_free_kmem(cpipe); 457 cpipe->pipe_buffer.buffer = buffer; 458 cpipe->pipe_buffer.size = size; 459 cpipe->pipe_buffer.in = cnt; 460 cpipe->pipe_buffer.out = 0; 461 cpipe->pipe_buffer.cnt = cnt; 462 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size); 463 return (0); 464 } 465 466 /* 467 * Wrapper for pipespace_new() that performs locking assertions. 468 */ 469 static int 470 pipespace(cpipe, size) 471 struct pipe *cpipe; 472 int size; 473 { 474 475 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 476 ("Unlocked pipe passed to pipespace")); 477 return (pipespace_new(cpipe, size)); 478 } 479 480 /* 481 * lock a pipe for I/O, blocking other access 482 */ 483 static __inline int 484 pipelock(cpipe, catch) 485 struct pipe *cpipe; 486 int catch; 487 { 488 int error; 489 490 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 491 while (cpipe->pipe_state & PIPE_LOCKFL) { 492 cpipe->pipe_state |= PIPE_LWANT; 493 error = msleep(cpipe, PIPE_MTX(cpipe), 494 catch ? (PRIBIO | PCATCH) : PRIBIO, 495 "pipelk", 0); 496 if (error != 0) 497 return (error); 498 } 499 cpipe->pipe_state |= PIPE_LOCKFL; 500 return (0); 501 } 502 503 /* 504 * unlock a pipe I/O lock 505 */ 506 static __inline void 507 pipeunlock(cpipe) 508 struct pipe *cpipe; 509 { 510 511 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 512 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 513 ("Unlocked pipe passed to pipeunlock")); 514 cpipe->pipe_state &= ~PIPE_LOCKFL; 515 if (cpipe->pipe_state & PIPE_LWANT) { 516 cpipe->pipe_state &= ~PIPE_LWANT; 517 wakeup(cpipe); 518 } 519 } 520 521 static __inline void 522 pipeselwakeup(cpipe) 523 struct pipe *cpipe; 524 { 525 526 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 527 if (cpipe->pipe_state & PIPE_SEL) { 528 selwakeuppri(&cpipe->pipe_sel, PSOCK); 529 if (!SEL_WAITING(&cpipe->pipe_sel)) 530 cpipe->pipe_state &= ~PIPE_SEL; 531 } 532 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 533 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 534 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0); 535 } 536 537 /* 538 * Initialize and allocate VM and memory for pipe. The structure 539 * will start out zero'd from the ctor, so we just manage the kmem. 540 */ 541 static int 542 pipe_create(pipe, backing) 543 struct pipe *pipe; 544 int backing; 545 { 546 int error; 547 548 if (backing) { 549 if (amountpipekva > maxpipekva / 2) 550 error = pipespace_new(pipe, SMALL_PIPE_SIZE); 551 else 552 error = pipespace_new(pipe, PIPE_SIZE); 553 } else { 554 /* If we're not backing this pipe, no need to do anything. */ 555 error = 0; 556 } 557 return (error); 558 } 559 560 /* ARGSUSED */ 561 static int 562 pipe_read(fp, uio, active_cred, flags, td) 563 struct file *fp; 564 struct uio *uio; 565 struct ucred *active_cred; 566 struct thread *td; 567 int flags; 568 { 569 struct pipe *rpipe = fp->f_data; 570 int error; 571 int nread = 0; 572 u_int size; 573 574 PIPE_LOCK(rpipe); 575 ++rpipe->pipe_busy; 576 error = pipelock(rpipe, 1); 577 if (error) 578 goto unlocked_error; 579 580 #ifdef MAC 581 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair); 582 if (error) 583 goto locked_error; 584 #endif 585 if (amountpipekva > (3 * maxpipekva) / 4) { 586 if (!(rpipe->pipe_state & PIPE_DIRECTW) && 587 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 588 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 589 (piperesizeallowed == 1)) { 590 PIPE_UNLOCK(rpipe); 591 pipespace(rpipe, SMALL_PIPE_SIZE); 592 PIPE_LOCK(rpipe); 593 } 594 } 595 596 while (uio->uio_resid) { 597 /* 598 * normal pipe buffer receive 599 */ 600 if (rpipe->pipe_buffer.cnt > 0) { 601 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 602 if (size > rpipe->pipe_buffer.cnt) 603 size = rpipe->pipe_buffer.cnt; 604 if (size > (u_int) uio->uio_resid) 605 size = (u_int) uio->uio_resid; 606 607 PIPE_UNLOCK(rpipe); 608 error = uiomove( 609 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 610 size, uio); 611 PIPE_LOCK(rpipe); 612 if (error) 613 break; 614 615 rpipe->pipe_buffer.out += size; 616 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 617 rpipe->pipe_buffer.out = 0; 618 619 rpipe->pipe_buffer.cnt -= size; 620 621 /* 622 * If there is no more to read in the pipe, reset 623 * its pointers to the beginning. This improves 624 * cache hit stats. 625 */ 626 if (rpipe->pipe_buffer.cnt == 0) { 627 rpipe->pipe_buffer.in = 0; 628 rpipe->pipe_buffer.out = 0; 629 } 630 nread += size; 631 #ifndef PIPE_NODIRECT 632 /* 633 * Direct copy, bypassing a kernel buffer. 634 */ 635 } else if ((size = rpipe->pipe_map.cnt) && 636 (rpipe->pipe_state & PIPE_DIRECTW)) { 637 if (size > (u_int) uio->uio_resid) 638 size = (u_int) uio->uio_resid; 639 640 PIPE_UNLOCK(rpipe); 641 error = uiomove_fromphys(rpipe->pipe_map.ms, 642 rpipe->pipe_map.pos, size, uio); 643 PIPE_LOCK(rpipe); 644 if (error) 645 break; 646 nread += size; 647 rpipe->pipe_map.pos += size; 648 rpipe->pipe_map.cnt -= size; 649 if (rpipe->pipe_map.cnt == 0) { 650 rpipe->pipe_state &= ~PIPE_DIRECTW; 651 wakeup(rpipe); 652 } 653 #endif 654 } else { 655 /* 656 * detect EOF condition 657 * read returns 0 on EOF, no need to set error 658 */ 659 if (rpipe->pipe_state & PIPE_EOF) 660 break; 661 662 /* 663 * If the "write-side" has been blocked, wake it up now. 664 */ 665 if (rpipe->pipe_state & PIPE_WANTW) { 666 rpipe->pipe_state &= ~PIPE_WANTW; 667 wakeup(rpipe); 668 } 669 670 /* 671 * Break if some data was read. 672 */ 673 if (nread > 0) 674 break; 675 676 /* 677 * Unlock the pipe buffer for our remaining processing. 678 * We will either break out with an error or we will 679 * sleep and relock to loop. 680 */ 681 pipeunlock(rpipe); 682 683 /* 684 * Handle non-blocking mode operation or 685 * wait for more data. 686 */ 687 if (fp->f_flag & FNONBLOCK) { 688 error = EAGAIN; 689 } else { 690 rpipe->pipe_state |= PIPE_WANTR; 691 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 692 PRIBIO | PCATCH, 693 "piperd", 0)) == 0) 694 error = pipelock(rpipe, 1); 695 } 696 if (error) 697 goto unlocked_error; 698 } 699 } 700 #ifdef MAC 701 locked_error: 702 #endif 703 pipeunlock(rpipe); 704 705 /* XXX: should probably do this before getting any locks. */ 706 if (error == 0) 707 vfs_timestamp(&rpipe->pipe_atime); 708 unlocked_error: 709 --rpipe->pipe_busy; 710 711 /* 712 * PIPE_WANT processing only makes sense if pipe_busy is 0. 713 */ 714 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 715 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 716 wakeup(rpipe); 717 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 718 /* 719 * Handle write blocking hysteresis. 720 */ 721 if (rpipe->pipe_state & PIPE_WANTW) { 722 rpipe->pipe_state &= ~PIPE_WANTW; 723 wakeup(rpipe); 724 } 725 } 726 727 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 728 pipeselwakeup(rpipe); 729 730 PIPE_UNLOCK(rpipe); 731 return (error); 732 } 733 734 #ifndef PIPE_NODIRECT 735 /* 736 * Map the sending processes' buffer into kernel space and wire it. 737 * This is similar to a physical write operation. 738 */ 739 static int 740 pipe_build_write_buffer(wpipe, uio) 741 struct pipe *wpipe; 742 struct uio *uio; 743 { 744 pmap_t pmap; 745 u_int size; 746 int i, j; 747 vm_offset_t addr, endaddr; 748 749 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 750 KASSERT(wpipe->pipe_state & PIPE_DIRECTW, 751 ("Clone attempt on non-direct write pipe!")); 752 753 size = (u_int) uio->uio_iov->iov_len; 754 if (size > wpipe->pipe_buffer.size) 755 size = wpipe->pipe_buffer.size; 756 757 pmap = vmspace_pmap(curproc->p_vmspace); 758 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 759 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 760 if (endaddr < addr) 761 return (EFAULT); 762 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 763 /* 764 * vm_fault_quick() can sleep. Consequently, 765 * vm_page_lock_queue() and vm_page_unlock_queue() 766 * should not be performed outside of this loop. 767 */ 768 race: 769 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) { 770 vm_page_lock_queues(); 771 for (j = 0; j < i; j++) 772 vm_page_unhold(wpipe->pipe_map.ms[j]); 773 vm_page_unlock_queues(); 774 return (EFAULT); 775 } 776 wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr, 777 VM_PROT_READ); 778 if (wpipe->pipe_map.ms[i] == NULL) 779 goto race; 780 } 781 782 /* 783 * set up the control block 784 */ 785 wpipe->pipe_map.npages = i; 786 wpipe->pipe_map.pos = 787 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 788 wpipe->pipe_map.cnt = size; 789 790 /* 791 * and update the uio data 792 */ 793 794 uio->uio_iov->iov_len -= size; 795 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; 796 if (uio->uio_iov->iov_len == 0) 797 uio->uio_iov++; 798 uio->uio_resid -= size; 799 uio->uio_offset += size; 800 return (0); 801 } 802 803 /* 804 * unmap and unwire the process buffer 805 */ 806 static void 807 pipe_destroy_write_buffer(wpipe) 808 struct pipe *wpipe; 809 { 810 int i; 811 812 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 813 vm_page_lock_queues(); 814 for (i = 0; i < wpipe->pipe_map.npages; i++) { 815 vm_page_unhold(wpipe->pipe_map.ms[i]); 816 } 817 vm_page_unlock_queues(); 818 wpipe->pipe_map.npages = 0; 819 } 820 821 /* 822 * In the case of a signal, the writing process might go away. This 823 * code copies the data into the circular buffer so that the source 824 * pages can be freed without loss of data. 825 */ 826 static void 827 pipe_clone_write_buffer(wpipe) 828 struct pipe *wpipe; 829 { 830 struct uio uio; 831 struct iovec iov; 832 int size; 833 int pos; 834 835 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 836 size = wpipe->pipe_map.cnt; 837 pos = wpipe->pipe_map.pos; 838 839 wpipe->pipe_buffer.in = size; 840 wpipe->pipe_buffer.out = 0; 841 wpipe->pipe_buffer.cnt = size; 842 wpipe->pipe_state &= ~PIPE_DIRECTW; 843 844 PIPE_UNLOCK(wpipe); 845 iov.iov_base = wpipe->pipe_buffer.buffer; 846 iov.iov_len = size; 847 uio.uio_iov = &iov; 848 uio.uio_iovcnt = 1; 849 uio.uio_offset = 0; 850 uio.uio_resid = size; 851 uio.uio_segflg = UIO_SYSSPACE; 852 uio.uio_rw = UIO_READ; 853 uio.uio_td = curthread; 854 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio); 855 PIPE_LOCK(wpipe); 856 pipe_destroy_write_buffer(wpipe); 857 } 858 859 /* 860 * This implements the pipe buffer write mechanism. Note that only 861 * a direct write OR a normal pipe write can be pending at any given time. 862 * If there are any characters in the pipe buffer, the direct write will 863 * be deferred until the receiving process grabs all of the bytes from 864 * the pipe buffer. Then the direct mapping write is set-up. 865 */ 866 static int 867 pipe_direct_write(wpipe, uio) 868 struct pipe *wpipe; 869 struct uio *uio; 870 { 871 int error; 872 873 retry: 874 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 875 error = pipelock(wpipe, 1); 876 if (wpipe->pipe_state & PIPE_EOF) 877 error = EPIPE; 878 if (error) { 879 pipeunlock(wpipe); 880 goto error1; 881 } 882 while (wpipe->pipe_state & PIPE_DIRECTW) { 883 if (wpipe->pipe_state & PIPE_WANTR) { 884 wpipe->pipe_state &= ~PIPE_WANTR; 885 wakeup(wpipe); 886 } 887 pipeselwakeup(wpipe); 888 wpipe->pipe_state |= PIPE_WANTW; 889 pipeunlock(wpipe); 890 error = msleep(wpipe, PIPE_MTX(wpipe), 891 PRIBIO | PCATCH, "pipdww", 0); 892 if (error) 893 goto error1; 894 else 895 goto retry; 896 } 897 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 898 if (wpipe->pipe_buffer.cnt > 0) { 899 if (wpipe->pipe_state & PIPE_WANTR) { 900 wpipe->pipe_state &= ~PIPE_WANTR; 901 wakeup(wpipe); 902 } 903 pipeselwakeup(wpipe); 904 wpipe->pipe_state |= PIPE_WANTW; 905 pipeunlock(wpipe); 906 error = msleep(wpipe, PIPE_MTX(wpipe), 907 PRIBIO | PCATCH, "pipdwc", 0); 908 if (error) 909 goto error1; 910 else 911 goto retry; 912 } 913 914 wpipe->pipe_state |= PIPE_DIRECTW; 915 916 PIPE_UNLOCK(wpipe); 917 error = pipe_build_write_buffer(wpipe, uio); 918 PIPE_LOCK(wpipe); 919 if (error) { 920 wpipe->pipe_state &= ~PIPE_DIRECTW; 921 pipeunlock(wpipe); 922 goto error1; 923 } 924 925 error = 0; 926 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 927 if (wpipe->pipe_state & PIPE_EOF) { 928 pipe_destroy_write_buffer(wpipe); 929 pipeselwakeup(wpipe); 930 pipeunlock(wpipe); 931 error = EPIPE; 932 goto error1; 933 } 934 if (wpipe->pipe_state & PIPE_WANTR) { 935 wpipe->pipe_state &= ~PIPE_WANTR; 936 wakeup(wpipe); 937 } 938 pipeselwakeup(wpipe); 939 pipeunlock(wpipe); 940 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 941 "pipdwt", 0); 942 pipelock(wpipe, 0); 943 } 944 945 if (wpipe->pipe_state & PIPE_EOF) 946 error = EPIPE; 947 if (wpipe->pipe_state & PIPE_DIRECTW) { 948 /* 949 * this bit of trickery substitutes a kernel buffer for 950 * the process that might be going away. 951 */ 952 pipe_clone_write_buffer(wpipe); 953 } else { 954 pipe_destroy_write_buffer(wpipe); 955 } 956 pipeunlock(wpipe); 957 return (error); 958 959 error1: 960 wakeup(wpipe); 961 return (error); 962 } 963 #endif 964 965 static int 966 pipe_write(fp, uio, active_cred, flags, td) 967 struct file *fp; 968 struct uio *uio; 969 struct ucred *active_cred; 970 struct thread *td; 971 int flags; 972 { 973 int error = 0; 974 int desiredsize, orig_resid; 975 struct pipe *wpipe, *rpipe; 976 977 rpipe = fp->f_data; 978 wpipe = rpipe->pipe_peer; 979 980 PIPE_LOCK(rpipe); 981 error = pipelock(wpipe, 1); 982 if (error) { 983 PIPE_UNLOCK(rpipe); 984 return (error); 985 } 986 /* 987 * detect loss of pipe read side, issue SIGPIPE if lost. 988 */ 989 if (wpipe->pipe_present != PIPE_ACTIVE || 990 (wpipe->pipe_state & PIPE_EOF)) { 991 pipeunlock(wpipe); 992 PIPE_UNLOCK(rpipe); 993 return (EPIPE); 994 } 995 #ifdef MAC 996 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair); 997 if (error) { 998 pipeunlock(wpipe); 999 PIPE_UNLOCK(rpipe); 1000 return (error); 1001 } 1002 #endif 1003 ++wpipe->pipe_busy; 1004 1005 /* Choose a larger size if it's advantageous */ 1006 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size); 1007 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) { 1008 if (piperesizeallowed != 1) 1009 break; 1010 if (amountpipekva > maxpipekva / 2) 1011 break; 1012 if (desiredsize == BIG_PIPE_SIZE) 1013 break; 1014 desiredsize = desiredsize * 2; 1015 } 1016 1017 /* Choose a smaller size if we're in a OOM situation */ 1018 if ((amountpipekva > (3 * maxpipekva) / 4) && 1019 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 1020 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 1021 (piperesizeallowed == 1)) 1022 desiredsize = SMALL_PIPE_SIZE; 1023 1024 /* Resize if the above determined that a new size was necessary */ 1025 if ((desiredsize != wpipe->pipe_buffer.size) && 1026 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) { 1027 PIPE_UNLOCK(wpipe); 1028 pipespace(wpipe, desiredsize); 1029 PIPE_LOCK(wpipe); 1030 } 1031 if (wpipe->pipe_buffer.size == 0) { 1032 /* 1033 * This can only happen for reverse direction use of pipes 1034 * in a complete OOM situation. 1035 */ 1036 error = ENOMEM; 1037 --wpipe->pipe_busy; 1038 pipeunlock(wpipe); 1039 PIPE_UNLOCK(wpipe); 1040 return (error); 1041 } 1042 1043 pipeunlock(wpipe); 1044 1045 orig_resid = uio->uio_resid; 1046 1047 while (uio->uio_resid) { 1048 int space; 1049 1050 pipelock(wpipe, 0); 1051 if (wpipe->pipe_state & PIPE_EOF) { 1052 pipeunlock(wpipe); 1053 error = EPIPE; 1054 break; 1055 } 1056 #ifndef PIPE_NODIRECT 1057 /* 1058 * If the transfer is large, we can gain performance if 1059 * we do process-to-process copies directly. 1060 * If the write is non-blocking, we don't use the 1061 * direct write mechanism. 1062 * 1063 * The direct write mechanism will detect the reader going 1064 * away on us. 1065 */ 1066 if (uio->uio_segflg == UIO_USERSPACE && 1067 uio->uio_iov->iov_len >= PIPE_MINDIRECT && 1068 wpipe->pipe_buffer.size >= PIPE_MINDIRECT && 1069 (fp->f_flag & FNONBLOCK) == 0) { 1070 pipeunlock(wpipe); 1071 error = pipe_direct_write(wpipe, uio); 1072 if (error) 1073 break; 1074 continue; 1075 } 1076 #endif 1077 1078 /* 1079 * Pipe buffered writes cannot be coincidental with 1080 * direct writes. We wait until the currently executing 1081 * direct write is completed before we start filling the 1082 * pipe buffer. We break out if a signal occurs or the 1083 * reader goes away. 1084 */ 1085 if (wpipe->pipe_state & PIPE_DIRECTW) { 1086 if (wpipe->pipe_state & PIPE_WANTR) { 1087 wpipe->pipe_state &= ~PIPE_WANTR; 1088 wakeup(wpipe); 1089 } 1090 pipeselwakeup(wpipe); 1091 wpipe->pipe_state |= PIPE_WANTW; 1092 pipeunlock(wpipe); 1093 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 1094 "pipbww", 0); 1095 if (error) 1096 break; 1097 else 1098 continue; 1099 } 1100 1101 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1102 1103 /* Writes of size <= PIPE_BUF must be atomic. */ 1104 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 1105 space = 0; 1106 1107 if (space > 0) { 1108 int size; /* Transfer size */ 1109 int segsize; /* first segment to transfer */ 1110 1111 /* 1112 * Transfer size is minimum of uio transfer 1113 * and free space in pipe buffer. 1114 */ 1115 if (space > uio->uio_resid) 1116 size = uio->uio_resid; 1117 else 1118 size = space; 1119 /* 1120 * First segment to transfer is minimum of 1121 * transfer size and contiguous space in 1122 * pipe buffer. If first segment to transfer 1123 * is less than the transfer size, we've got 1124 * a wraparound in the buffer. 1125 */ 1126 segsize = wpipe->pipe_buffer.size - 1127 wpipe->pipe_buffer.in; 1128 if (segsize > size) 1129 segsize = size; 1130 1131 /* Transfer first segment */ 1132 1133 PIPE_UNLOCK(rpipe); 1134 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1135 segsize, uio); 1136 PIPE_LOCK(rpipe); 1137 1138 if (error == 0 && segsize < size) { 1139 KASSERT(wpipe->pipe_buffer.in + segsize == 1140 wpipe->pipe_buffer.size, 1141 ("Pipe buffer wraparound disappeared")); 1142 /* 1143 * Transfer remaining part now, to 1144 * support atomic writes. Wraparound 1145 * happened. 1146 */ 1147 1148 PIPE_UNLOCK(rpipe); 1149 error = uiomove( 1150 &wpipe->pipe_buffer.buffer[0], 1151 size - segsize, uio); 1152 PIPE_LOCK(rpipe); 1153 } 1154 if (error == 0) { 1155 wpipe->pipe_buffer.in += size; 1156 if (wpipe->pipe_buffer.in >= 1157 wpipe->pipe_buffer.size) { 1158 KASSERT(wpipe->pipe_buffer.in == 1159 size - segsize + 1160 wpipe->pipe_buffer.size, 1161 ("Expected wraparound bad")); 1162 wpipe->pipe_buffer.in = size - segsize; 1163 } 1164 1165 wpipe->pipe_buffer.cnt += size; 1166 KASSERT(wpipe->pipe_buffer.cnt <= 1167 wpipe->pipe_buffer.size, 1168 ("Pipe buffer overflow")); 1169 } 1170 pipeunlock(wpipe); 1171 if (error != 0) 1172 break; 1173 } else { 1174 /* 1175 * If the "read-side" has been blocked, wake it up now. 1176 */ 1177 if (wpipe->pipe_state & PIPE_WANTR) { 1178 wpipe->pipe_state &= ~PIPE_WANTR; 1179 wakeup(wpipe); 1180 } 1181 1182 /* 1183 * don't block on non-blocking I/O 1184 */ 1185 if (fp->f_flag & FNONBLOCK) { 1186 error = EAGAIN; 1187 pipeunlock(wpipe); 1188 break; 1189 } 1190 1191 /* 1192 * We have no more space and have something to offer, 1193 * wake up select/poll. 1194 */ 1195 pipeselwakeup(wpipe); 1196 1197 wpipe->pipe_state |= PIPE_WANTW; 1198 pipeunlock(wpipe); 1199 error = msleep(wpipe, PIPE_MTX(rpipe), 1200 PRIBIO | PCATCH, "pipewr", 0); 1201 if (error != 0) 1202 break; 1203 } 1204 } 1205 1206 pipelock(wpipe, 0); 1207 --wpipe->pipe_busy; 1208 1209 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1210 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1211 wakeup(wpipe); 1212 } else if (wpipe->pipe_buffer.cnt > 0) { 1213 /* 1214 * If we have put any characters in the buffer, we wake up 1215 * the reader. 1216 */ 1217 if (wpipe->pipe_state & PIPE_WANTR) { 1218 wpipe->pipe_state &= ~PIPE_WANTR; 1219 wakeup(wpipe); 1220 } 1221 } 1222 1223 /* 1224 * Don't return EPIPE if I/O was successful 1225 */ 1226 if ((wpipe->pipe_buffer.cnt == 0) && 1227 (uio->uio_resid == 0) && 1228 (error == EPIPE)) { 1229 error = 0; 1230 } 1231 1232 if (error == 0) 1233 vfs_timestamp(&wpipe->pipe_mtime); 1234 1235 /* 1236 * We have something to offer, 1237 * wake up select/poll. 1238 */ 1239 if (wpipe->pipe_buffer.cnt) 1240 pipeselwakeup(wpipe); 1241 1242 pipeunlock(wpipe); 1243 PIPE_UNLOCK(rpipe); 1244 return (error); 1245 } 1246 1247 /* ARGSUSED */ 1248 static int 1249 pipe_truncate(fp, length, active_cred, td) 1250 struct file *fp; 1251 off_t length; 1252 struct ucred *active_cred; 1253 struct thread *td; 1254 { 1255 1256 return (EINVAL); 1257 } 1258 1259 /* 1260 * we implement a very minimal set of ioctls for compatibility with sockets. 1261 */ 1262 static int 1263 pipe_ioctl(fp, cmd, data, active_cred, td) 1264 struct file *fp; 1265 u_long cmd; 1266 void *data; 1267 struct ucred *active_cred; 1268 struct thread *td; 1269 { 1270 struct pipe *mpipe = fp->f_data; 1271 int error; 1272 1273 PIPE_LOCK(mpipe); 1274 1275 #ifdef MAC 1276 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data); 1277 if (error) { 1278 PIPE_UNLOCK(mpipe); 1279 return (error); 1280 } 1281 #endif 1282 1283 error = 0; 1284 switch (cmd) { 1285 1286 case FIONBIO: 1287 break; 1288 1289 case FIOASYNC: 1290 if (*(int *)data) { 1291 mpipe->pipe_state |= PIPE_ASYNC; 1292 } else { 1293 mpipe->pipe_state &= ~PIPE_ASYNC; 1294 } 1295 break; 1296 1297 case FIONREAD: 1298 if (mpipe->pipe_state & PIPE_DIRECTW) 1299 *(int *)data = mpipe->pipe_map.cnt; 1300 else 1301 *(int *)data = mpipe->pipe_buffer.cnt; 1302 break; 1303 1304 case FIOSETOWN: 1305 PIPE_UNLOCK(mpipe); 1306 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1307 goto out_unlocked; 1308 1309 case FIOGETOWN: 1310 *(int *)data = fgetown(&mpipe->pipe_sigio); 1311 break; 1312 1313 /* This is deprecated, FIOSETOWN should be used instead. */ 1314 case TIOCSPGRP: 1315 PIPE_UNLOCK(mpipe); 1316 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1317 goto out_unlocked; 1318 1319 /* This is deprecated, FIOGETOWN should be used instead. */ 1320 case TIOCGPGRP: 1321 *(int *)data = -fgetown(&mpipe->pipe_sigio); 1322 break; 1323 1324 default: 1325 error = ENOTTY; 1326 break; 1327 } 1328 PIPE_UNLOCK(mpipe); 1329 out_unlocked: 1330 return (error); 1331 } 1332 1333 static int 1334 pipe_poll(fp, events, active_cred, td) 1335 struct file *fp; 1336 int events; 1337 struct ucred *active_cred; 1338 struct thread *td; 1339 { 1340 struct pipe *rpipe = fp->f_data; 1341 struct pipe *wpipe; 1342 int revents = 0; 1343 #ifdef MAC 1344 int error; 1345 #endif 1346 1347 wpipe = rpipe->pipe_peer; 1348 PIPE_LOCK(rpipe); 1349 #ifdef MAC 1350 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair); 1351 if (error) 1352 goto locked_error; 1353 #endif 1354 if (events & (POLLIN | POLLRDNORM)) 1355 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1356 (rpipe->pipe_buffer.cnt > 0)) 1357 revents |= events & (POLLIN | POLLRDNORM); 1358 1359 if (events & (POLLOUT | POLLWRNORM)) 1360 if (wpipe->pipe_present != PIPE_ACTIVE || 1361 (wpipe->pipe_state & PIPE_EOF) || 1362 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1363 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1364 revents |= events & (POLLOUT | POLLWRNORM); 1365 1366 if ((events & POLLINIGNEOF) == 0) { 1367 if (rpipe->pipe_state & PIPE_EOF) { 1368 revents |= (events & (POLLIN | POLLRDNORM)); 1369 if (wpipe->pipe_present != PIPE_ACTIVE || 1370 (wpipe->pipe_state & PIPE_EOF)) 1371 revents |= POLLHUP; 1372 } 1373 } 1374 1375 if (revents == 0) { 1376 if (events & (POLLIN | POLLRDNORM)) { 1377 selrecord(td, &rpipe->pipe_sel); 1378 if (SEL_WAITING(&rpipe->pipe_sel)) 1379 rpipe->pipe_state |= PIPE_SEL; 1380 } 1381 1382 if (events & (POLLOUT | POLLWRNORM)) { 1383 selrecord(td, &wpipe->pipe_sel); 1384 if (SEL_WAITING(&wpipe->pipe_sel)) 1385 wpipe->pipe_state |= PIPE_SEL; 1386 } 1387 } 1388 #ifdef MAC 1389 locked_error: 1390 #endif 1391 PIPE_UNLOCK(rpipe); 1392 1393 return (revents); 1394 } 1395 1396 /* 1397 * We shouldn't need locks here as we're doing a read and this should 1398 * be a natural race. 1399 */ 1400 static int 1401 pipe_stat(fp, ub, active_cred, td) 1402 struct file *fp; 1403 struct stat *ub; 1404 struct ucred *active_cred; 1405 struct thread *td; 1406 { 1407 struct pipe *pipe = fp->f_data; 1408 #ifdef MAC 1409 int error; 1410 1411 PIPE_LOCK(pipe); 1412 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair); 1413 PIPE_UNLOCK(pipe); 1414 if (error) 1415 return (error); 1416 #endif 1417 bzero(ub, sizeof(*ub)); 1418 ub->st_mode = S_IFIFO; 1419 ub->st_blksize = PAGE_SIZE; 1420 if (pipe->pipe_state & PIPE_DIRECTW) 1421 ub->st_size = pipe->pipe_map.cnt; 1422 else 1423 ub->st_size = pipe->pipe_buffer.cnt; 1424 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1425 ub->st_atimespec = pipe->pipe_atime; 1426 ub->st_mtimespec = pipe->pipe_mtime; 1427 ub->st_ctimespec = pipe->pipe_ctime; 1428 ub->st_uid = fp->f_cred->cr_uid; 1429 ub->st_gid = fp->f_cred->cr_gid; 1430 /* 1431 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 1432 * XXX (st_dev, st_ino) should be unique. 1433 */ 1434 return (0); 1435 } 1436 1437 /* ARGSUSED */ 1438 static int 1439 pipe_close(fp, td) 1440 struct file *fp; 1441 struct thread *td; 1442 { 1443 struct pipe *cpipe = fp->f_data; 1444 1445 fp->f_ops = &badfileops; 1446 fp->f_data = NULL; 1447 funsetown(&cpipe->pipe_sigio); 1448 pipeclose(cpipe); 1449 return (0); 1450 } 1451 1452 static void 1453 pipe_free_kmem(cpipe) 1454 struct pipe *cpipe; 1455 { 1456 1457 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), 1458 ("pipe_free_kmem: pipe mutex locked")); 1459 1460 if (cpipe->pipe_buffer.buffer != NULL) { 1461 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size); 1462 vm_map_remove(pipe_map, 1463 (vm_offset_t)cpipe->pipe_buffer.buffer, 1464 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size); 1465 cpipe->pipe_buffer.buffer = NULL; 1466 } 1467 #ifndef PIPE_NODIRECT 1468 { 1469 cpipe->pipe_map.cnt = 0; 1470 cpipe->pipe_map.pos = 0; 1471 cpipe->pipe_map.npages = 0; 1472 } 1473 #endif 1474 } 1475 1476 /* 1477 * shutdown the pipe 1478 */ 1479 static void 1480 pipeclose(cpipe) 1481 struct pipe *cpipe; 1482 { 1483 struct pipepair *pp; 1484 struct pipe *ppipe; 1485 1486 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL")); 1487 1488 PIPE_LOCK(cpipe); 1489 pipelock(cpipe, 0); 1490 pp = cpipe->pipe_pair; 1491 1492 pipeselwakeup(cpipe); 1493 1494 /* 1495 * If the other side is blocked, wake it up saying that 1496 * we want to close it down. 1497 */ 1498 cpipe->pipe_state |= PIPE_EOF; 1499 while (cpipe->pipe_busy) { 1500 wakeup(cpipe); 1501 cpipe->pipe_state |= PIPE_WANT; 1502 pipeunlock(cpipe); 1503 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1504 pipelock(cpipe, 0); 1505 } 1506 1507 1508 /* 1509 * Disconnect from peer, if any. 1510 */ 1511 ppipe = cpipe->pipe_peer; 1512 if (ppipe->pipe_present == PIPE_ACTIVE) { 1513 pipeselwakeup(ppipe); 1514 1515 ppipe->pipe_state |= PIPE_EOF; 1516 wakeup(ppipe); 1517 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0); 1518 } 1519 1520 /* 1521 * Mark this endpoint as free. Release kmem resources. We 1522 * don't mark this endpoint as unused until we've finished 1523 * doing that, or the pipe might disappear out from under 1524 * us. 1525 */ 1526 PIPE_UNLOCK(cpipe); 1527 pipe_free_kmem(cpipe); 1528 PIPE_LOCK(cpipe); 1529 cpipe->pipe_present = PIPE_CLOSING; 1530 pipeunlock(cpipe); 1531 1532 /* 1533 * knlist_clear() may sleep dropping the PIPE_MTX. Set the 1534 * PIPE_FINALIZED, that allows other end to free the 1535 * pipe_pair, only after the knotes are completely dismantled. 1536 */ 1537 knlist_clear(&cpipe->pipe_sel.si_note, 1); 1538 cpipe->pipe_present = PIPE_FINALIZED; 1539 knlist_destroy(&cpipe->pipe_sel.si_note); 1540 1541 /* 1542 * If both endpoints are now closed, release the memory for the 1543 * pipe pair. If not, unlock. 1544 */ 1545 if (ppipe->pipe_present == PIPE_FINALIZED) { 1546 PIPE_UNLOCK(cpipe); 1547 #ifdef MAC 1548 mac_pipe_destroy(pp); 1549 #endif 1550 uma_zfree(pipe_zone, cpipe->pipe_pair); 1551 } else 1552 PIPE_UNLOCK(cpipe); 1553 } 1554 1555 /*ARGSUSED*/ 1556 static int 1557 pipe_kqfilter(struct file *fp, struct knote *kn) 1558 { 1559 struct pipe *cpipe; 1560 1561 cpipe = kn->kn_fp->f_data; 1562 PIPE_LOCK(cpipe); 1563 switch (kn->kn_filter) { 1564 case EVFILT_READ: 1565 kn->kn_fop = &pipe_rfiltops; 1566 break; 1567 case EVFILT_WRITE: 1568 kn->kn_fop = &pipe_wfiltops; 1569 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) { 1570 /* other end of pipe has been closed */ 1571 PIPE_UNLOCK(cpipe); 1572 return (EPIPE); 1573 } 1574 cpipe = cpipe->pipe_peer; 1575 break; 1576 default: 1577 PIPE_UNLOCK(cpipe); 1578 return (EINVAL); 1579 } 1580 1581 knlist_add(&cpipe->pipe_sel.si_note, kn, 1); 1582 PIPE_UNLOCK(cpipe); 1583 return (0); 1584 } 1585 1586 static void 1587 filt_pipedetach(struct knote *kn) 1588 { 1589 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data; 1590 1591 PIPE_LOCK(cpipe); 1592 if (kn->kn_filter == EVFILT_WRITE) 1593 cpipe = cpipe->pipe_peer; 1594 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1); 1595 PIPE_UNLOCK(cpipe); 1596 } 1597 1598 /*ARGSUSED*/ 1599 static int 1600 filt_piperead(struct knote *kn, long hint) 1601 { 1602 struct pipe *rpipe = kn->kn_fp->f_data; 1603 struct pipe *wpipe = rpipe->pipe_peer; 1604 int ret; 1605 1606 PIPE_LOCK(rpipe); 1607 kn->kn_data = rpipe->pipe_buffer.cnt; 1608 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1609 kn->kn_data = rpipe->pipe_map.cnt; 1610 1611 if ((rpipe->pipe_state & PIPE_EOF) || 1612 wpipe->pipe_present != PIPE_ACTIVE || 1613 (wpipe->pipe_state & PIPE_EOF)) { 1614 kn->kn_flags |= EV_EOF; 1615 PIPE_UNLOCK(rpipe); 1616 return (1); 1617 } 1618 ret = kn->kn_data > 0; 1619 PIPE_UNLOCK(rpipe); 1620 return ret; 1621 } 1622 1623 /*ARGSUSED*/ 1624 static int 1625 filt_pipewrite(struct knote *kn, long hint) 1626 { 1627 struct pipe *rpipe = kn->kn_fp->f_data; 1628 struct pipe *wpipe = rpipe->pipe_peer; 1629 1630 PIPE_LOCK(rpipe); 1631 if (wpipe->pipe_present != PIPE_ACTIVE || 1632 (wpipe->pipe_state & PIPE_EOF)) { 1633 kn->kn_data = 0; 1634 kn->kn_flags |= EV_EOF; 1635 PIPE_UNLOCK(rpipe); 1636 return (1); 1637 } 1638 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1639 if (wpipe->pipe_state & PIPE_DIRECTW) 1640 kn->kn_data = 0; 1641 1642 PIPE_UNLOCK(rpipe); 1643 return (kn->kn_data >= PIPE_BUF); 1644 } 1645