1 /*- 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 */ 19 20 /* 21 * This file contains a high-performance replacement for the socket-based 22 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 23 * all features of sockets, but does do everything that pipes normally 24 * do. 25 */ 26 27 /* 28 * This code has two modes of operation, a small write mode and a large 29 * write mode. The small write mode acts like conventional pipes with 30 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 31 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 32 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 33 * the receiving process can copy it directly from the pages in the sending 34 * process. 35 * 36 * If the sending process receives a signal, it is possible that it will 37 * go away, and certainly its address space can change, because control 38 * is returned back to the user-mode side. In that case, the pipe code 39 * arranges to copy the buffer supplied by the user process, to a pageable 40 * kernel buffer, and the receiving process will grab the data from the 41 * pageable kernel buffer. Since signals don't happen all that often, 42 * the copy operation is normally eliminated. 43 * 44 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 45 * happen for small transfers so that the system will not spend all of 46 * its time context switching. 47 * 48 * In order to limit the resource use of pipes, two sysctls exist: 49 * 50 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable 51 * address space available to us in pipe_map. This value is normally 52 * autotuned, but may also be loader tuned. 53 * 54 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of 55 * memory in use by pipes. 56 * 57 * Based on how large pipekva is relative to maxpipekva, the following 58 * will happen: 59 * 60 * 0% - 50%: 61 * New pipes are given 16K of memory backing, pipes may dynamically 62 * grow to as large as 64K where needed. 63 * 50% - 75%: 64 * New pipes are given 4K (or PAGE_SIZE) of memory backing, 65 * existing pipes may NOT grow. 66 * 75% - 100%: 67 * New pipes are given 4K (or PAGE_SIZE) of memory backing, 68 * existing pipes will be shrunk down to 4K whenever possible. 69 * 70 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If 71 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE 72 * resize which MUST occur for reverse-direction pipes when they are 73 * first used. 74 * 75 * Additional information about the current state of pipes may be obtained 76 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail, 77 * and kern.ipc.piperesizefail. 78 * 79 * Locking rules: There are two locks present here: A mutex, used via 80 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via 81 * the flag, as mutexes can not persist over uiomove. The mutex 82 * exists only to guard access to the flag, and is not in itself a 83 * locking mechanism. Also note that there is only a single mutex for 84 * both directions of a pipe. 85 * 86 * As pipelock() may have to sleep before it can acquire the flag, it 87 * is important to reread all data after a call to pipelock(); everything 88 * in the structure may have changed. 89 */ 90 91 #include <sys/cdefs.h> 92 __FBSDID("$FreeBSD$"); 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/fcntl.h> 97 #include <sys/file.h> 98 #include <sys/filedesc.h> 99 #include <sys/filio.h> 100 #include <sys/kernel.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 #include <sys/ttycom.h> 104 #include <sys/stat.h> 105 #include <sys/malloc.h> 106 #include <sys/poll.h> 107 #include <sys/selinfo.h> 108 #include <sys/signalvar.h> 109 #include <sys/syscallsubr.h> 110 #include <sys/sysctl.h> 111 #include <sys/sysproto.h> 112 #include <sys/pipe.h> 113 #include <sys/proc.h> 114 #include <sys/vnode.h> 115 #include <sys/uio.h> 116 #include <sys/event.h> 117 118 #include <security/mac/mac_framework.h> 119 120 #include <vm/vm.h> 121 #include <vm/vm_param.h> 122 #include <vm/vm_object.h> 123 #include <vm/vm_kern.h> 124 #include <vm/vm_extern.h> 125 #include <vm/pmap.h> 126 #include <vm/vm_map.h> 127 #include <vm/vm_page.h> 128 #include <vm/uma.h> 129 130 /* 131 * Use this define if you want to disable *fancy* VM things. Expect an 132 * approx 30% decrease in transfer rate. This could be useful for 133 * NetBSD or OpenBSD. 134 */ 135 /* #define PIPE_NODIRECT */ 136 137 /* 138 * interfaces to the outside world 139 */ 140 static fo_rdwr_t pipe_read; 141 static fo_rdwr_t pipe_write; 142 static fo_truncate_t pipe_truncate; 143 static fo_ioctl_t pipe_ioctl; 144 static fo_poll_t pipe_poll; 145 static fo_kqfilter_t pipe_kqfilter; 146 static fo_stat_t pipe_stat; 147 static fo_close_t pipe_close; 148 149 static struct fileops pipeops = { 150 .fo_read = pipe_read, 151 .fo_write = pipe_write, 152 .fo_truncate = pipe_truncate, 153 .fo_ioctl = pipe_ioctl, 154 .fo_poll = pipe_poll, 155 .fo_kqfilter = pipe_kqfilter, 156 .fo_stat = pipe_stat, 157 .fo_close = pipe_close, 158 .fo_flags = DFLAG_PASSABLE 159 }; 160 161 static void filt_pipedetach(struct knote *kn); 162 static int filt_piperead(struct knote *kn, long hint); 163 static int filt_pipewrite(struct knote *kn, long hint); 164 165 static struct filterops pipe_rfiltops = { 166 .f_isfd = 1, 167 .f_detach = filt_pipedetach, 168 .f_event = filt_piperead 169 }; 170 static struct filterops pipe_wfiltops = { 171 .f_isfd = 1, 172 .f_detach = filt_pipedetach, 173 .f_event = filt_pipewrite 174 }; 175 176 /* 177 * Default pipe buffer size(s), this can be kind-of large now because pipe 178 * space is pageable. The pipe code will try to maintain locality of 179 * reference for performance reasons, so small amounts of outstanding I/O 180 * will not wipe the cache. 181 */ 182 #define MINPIPESIZE (PIPE_SIZE/3) 183 #define MAXPIPESIZE (2*PIPE_SIZE/3) 184 185 static long amountpipekva; 186 static int pipefragretry; 187 static int pipeallocfail; 188 static int piperesizefail; 189 static int piperesizeallowed = 1; 190 191 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN, 192 &maxpipekva, 0, "Pipe KVA limit"); 193 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD, 194 &amountpipekva, 0, "Pipe KVA usage"); 195 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD, 196 &pipefragretry, 0, "Pipe allocation retries due to fragmentation"); 197 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD, 198 &pipeallocfail, 0, "Pipe allocation failures"); 199 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD, 200 &piperesizefail, 0, "Pipe resize failures"); 201 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW, 202 &piperesizeallowed, 0, "Pipe resizing allowed"); 203 204 static void pipeinit(void *dummy __unused); 205 static void pipeclose(struct pipe *cpipe); 206 static void pipe_free_kmem(struct pipe *cpipe); 207 static int pipe_create(struct pipe *pipe, int backing); 208 static __inline int pipelock(struct pipe *cpipe, int catch); 209 static __inline void pipeunlock(struct pipe *cpipe); 210 static __inline void pipeselwakeup(struct pipe *cpipe); 211 #ifndef PIPE_NODIRECT 212 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 213 static void pipe_destroy_write_buffer(struct pipe *wpipe); 214 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 215 static void pipe_clone_write_buffer(struct pipe *wpipe); 216 #endif 217 static int pipespace(struct pipe *cpipe, int size); 218 static int pipespace_new(struct pipe *cpipe, int size); 219 220 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags); 221 static int pipe_zone_init(void *mem, int size, int flags); 222 static void pipe_zone_fini(void *mem, int size); 223 224 static uma_zone_t pipe_zone; 225 226 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 227 228 static void 229 pipeinit(void *dummy __unused) 230 { 231 232 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair), 233 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini, 234 UMA_ALIGN_PTR, 0); 235 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 236 } 237 238 static int 239 pipe_zone_ctor(void *mem, int size, void *arg, int flags) 240 { 241 struct pipepair *pp; 242 struct pipe *rpipe, *wpipe; 243 244 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size")); 245 246 pp = (struct pipepair *)mem; 247 248 /* 249 * We zero both pipe endpoints to make sure all the kmem pointers 250 * are NULL, flag fields are zero'd, etc. We timestamp both 251 * endpoints with the same time. 252 */ 253 rpipe = &pp->pp_rpipe; 254 bzero(rpipe, sizeof(*rpipe)); 255 vfs_timestamp(&rpipe->pipe_ctime); 256 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime; 257 258 wpipe = &pp->pp_wpipe; 259 bzero(wpipe, sizeof(*wpipe)); 260 wpipe->pipe_ctime = rpipe->pipe_ctime; 261 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime; 262 263 rpipe->pipe_peer = wpipe; 264 rpipe->pipe_pair = pp; 265 wpipe->pipe_peer = rpipe; 266 wpipe->pipe_pair = pp; 267 268 /* 269 * Mark both endpoints as present; they will later get free'd 270 * one at a time. When both are free'd, then the whole pair 271 * is released. 272 */ 273 rpipe->pipe_present = PIPE_ACTIVE; 274 wpipe->pipe_present = PIPE_ACTIVE; 275 276 /* 277 * Eventually, the MAC Framework may initialize the label 278 * in ctor or init, but for now we do it elswhere to avoid 279 * blocking in ctor or init. 280 */ 281 pp->pp_label = NULL; 282 283 return (0); 284 } 285 286 static int 287 pipe_zone_init(void *mem, int size, int flags) 288 { 289 struct pipepair *pp; 290 291 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size")); 292 293 pp = (struct pipepair *)mem; 294 295 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE); 296 return (0); 297 } 298 299 static void 300 pipe_zone_fini(void *mem, int size) 301 { 302 struct pipepair *pp; 303 304 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size")); 305 306 pp = (struct pipepair *)mem; 307 308 mtx_destroy(&pp->pp_mtx); 309 } 310 311 /* 312 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let 313 * the zone pick up the pieces via pipeclose(). 314 */ 315 int 316 kern_pipe(struct thread *td, int fildes[2]) 317 { 318 struct filedesc *fdp = td->td_proc->p_fd; 319 struct file *rf, *wf; 320 struct pipepair *pp; 321 struct pipe *rpipe, *wpipe; 322 int fd, error; 323 324 pp = uma_zalloc(pipe_zone, M_WAITOK); 325 #ifdef MAC 326 /* 327 * The MAC label is shared between the connected endpoints. As a 328 * result mac_pipe_init() and mac_pipe_create() are called once 329 * for the pair, and not on the endpoints. 330 */ 331 mac_pipe_init(pp); 332 mac_pipe_create(td->td_ucred, pp); 333 #endif 334 rpipe = &pp->pp_rpipe; 335 wpipe = &pp->pp_wpipe; 336 337 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe)); 338 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe)); 339 340 /* Only the forward direction pipe is backed by default */ 341 if ((error = pipe_create(rpipe, 1)) != 0 || 342 (error = pipe_create(wpipe, 0)) != 0) { 343 pipeclose(rpipe); 344 pipeclose(wpipe); 345 return (error); 346 } 347 348 rpipe->pipe_state |= PIPE_DIRECTOK; 349 wpipe->pipe_state |= PIPE_DIRECTOK; 350 351 error = falloc(td, &rf, &fd); 352 if (error) { 353 pipeclose(rpipe); 354 pipeclose(wpipe); 355 return (error); 356 } 357 /* An extra reference on `rf' has been held for us by falloc(). */ 358 fildes[0] = fd; 359 360 /* 361 * Warning: once we've gotten past allocation of the fd for the 362 * read-side, we can only drop the read side via fdrop() in order 363 * to avoid races against processes which manage to dup() the read 364 * side while we are blocked trying to allocate the write side. 365 */ 366 finit(rf, FREAD | FWRITE, DTYPE_PIPE, rpipe, &pipeops); 367 error = falloc(td, &wf, &fd); 368 if (error) { 369 fdclose(fdp, rf, fildes[0], td); 370 fdrop(rf, td); 371 /* rpipe has been closed by fdrop(). */ 372 pipeclose(wpipe); 373 return (error); 374 } 375 /* An extra reference on `wf' has been held for us by falloc(). */ 376 finit(wf, FREAD | FWRITE, DTYPE_PIPE, wpipe, &pipeops); 377 fdrop(wf, td); 378 fildes[1] = fd; 379 fdrop(rf, td); 380 381 return (0); 382 } 383 384 /* ARGSUSED */ 385 int 386 pipe(struct thread *td, struct pipe_args *uap) 387 { 388 int error; 389 int fildes[2]; 390 391 error = kern_pipe(td, fildes); 392 if (error) 393 return (error); 394 395 td->td_retval[0] = fildes[0]; 396 td->td_retval[1] = fildes[1]; 397 398 return (0); 399 } 400 401 /* 402 * Allocate kva for pipe circular buffer, the space is pageable 403 * This routine will 'realloc' the size of a pipe safely, if it fails 404 * it will retain the old buffer. 405 * If it fails it will return ENOMEM. 406 */ 407 static int 408 pipespace_new(cpipe, size) 409 struct pipe *cpipe; 410 int size; 411 { 412 caddr_t buffer; 413 int error, cnt, firstseg; 414 static int curfail = 0; 415 static struct timeval lastfail; 416 417 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked")); 418 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW), 419 ("pipespace: resize of direct writes not allowed")); 420 retry: 421 cnt = cpipe->pipe_buffer.cnt; 422 if (cnt > size) 423 size = cnt; 424 425 size = round_page(size); 426 buffer = (caddr_t) vm_map_min(pipe_map); 427 428 error = vm_map_find(pipe_map, NULL, 0, 429 (vm_offset_t *) &buffer, size, 1, 430 VM_PROT_ALL, VM_PROT_ALL, 0); 431 if (error != KERN_SUCCESS) { 432 if ((cpipe->pipe_buffer.buffer == NULL) && 433 (size > SMALL_PIPE_SIZE)) { 434 size = SMALL_PIPE_SIZE; 435 pipefragretry++; 436 goto retry; 437 } 438 if (cpipe->pipe_buffer.buffer == NULL) { 439 pipeallocfail++; 440 if (ppsratecheck(&lastfail, &curfail, 1)) 441 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n"); 442 } else { 443 piperesizefail++; 444 } 445 return (ENOMEM); 446 } 447 448 /* copy data, then free old resources if we're resizing */ 449 if (cnt > 0) { 450 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) { 451 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out; 452 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 453 buffer, firstseg); 454 if ((cnt - firstseg) > 0) 455 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg], 456 cpipe->pipe_buffer.in); 457 } else { 458 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out], 459 buffer, cnt); 460 } 461 } 462 pipe_free_kmem(cpipe); 463 cpipe->pipe_buffer.buffer = buffer; 464 cpipe->pipe_buffer.size = size; 465 cpipe->pipe_buffer.in = cnt; 466 cpipe->pipe_buffer.out = 0; 467 cpipe->pipe_buffer.cnt = cnt; 468 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size); 469 return (0); 470 } 471 472 /* 473 * Wrapper for pipespace_new() that performs locking assertions. 474 */ 475 static int 476 pipespace(cpipe, size) 477 struct pipe *cpipe; 478 int size; 479 { 480 481 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 482 ("Unlocked pipe passed to pipespace")); 483 return (pipespace_new(cpipe, size)); 484 } 485 486 /* 487 * lock a pipe for I/O, blocking other access 488 */ 489 static __inline int 490 pipelock(cpipe, catch) 491 struct pipe *cpipe; 492 int catch; 493 { 494 int error; 495 496 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 497 while (cpipe->pipe_state & PIPE_LOCKFL) { 498 cpipe->pipe_state |= PIPE_LWANT; 499 error = msleep(cpipe, PIPE_MTX(cpipe), 500 catch ? (PRIBIO | PCATCH) : PRIBIO, 501 "pipelk", 0); 502 if (error != 0) 503 return (error); 504 } 505 cpipe->pipe_state |= PIPE_LOCKFL; 506 return (0); 507 } 508 509 /* 510 * unlock a pipe I/O lock 511 */ 512 static __inline void 513 pipeunlock(cpipe) 514 struct pipe *cpipe; 515 { 516 517 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 518 KASSERT(cpipe->pipe_state & PIPE_LOCKFL, 519 ("Unlocked pipe passed to pipeunlock")); 520 cpipe->pipe_state &= ~PIPE_LOCKFL; 521 if (cpipe->pipe_state & PIPE_LWANT) { 522 cpipe->pipe_state &= ~PIPE_LWANT; 523 wakeup(cpipe); 524 } 525 } 526 527 static __inline void 528 pipeselwakeup(cpipe) 529 struct pipe *cpipe; 530 { 531 532 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 533 if (cpipe->pipe_state & PIPE_SEL) { 534 selwakeuppri(&cpipe->pipe_sel, PSOCK); 535 if (!SEL_WAITING(&cpipe->pipe_sel)) 536 cpipe->pipe_state &= ~PIPE_SEL; 537 } 538 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 539 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 540 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0); 541 } 542 543 /* 544 * Initialize and allocate VM and memory for pipe. The structure 545 * will start out zero'd from the ctor, so we just manage the kmem. 546 */ 547 static int 548 pipe_create(pipe, backing) 549 struct pipe *pipe; 550 int backing; 551 { 552 int error; 553 554 if (backing) { 555 if (amountpipekva > maxpipekva / 2) 556 error = pipespace_new(pipe, SMALL_PIPE_SIZE); 557 else 558 error = pipespace_new(pipe, PIPE_SIZE); 559 } else { 560 /* If we're not backing this pipe, no need to do anything. */ 561 error = 0; 562 } 563 return (error); 564 } 565 566 /* ARGSUSED */ 567 static int 568 pipe_read(fp, uio, active_cred, flags, td) 569 struct file *fp; 570 struct uio *uio; 571 struct ucred *active_cred; 572 struct thread *td; 573 int flags; 574 { 575 struct pipe *rpipe = fp->f_data; 576 int error; 577 int nread = 0; 578 u_int size; 579 580 PIPE_LOCK(rpipe); 581 ++rpipe->pipe_busy; 582 error = pipelock(rpipe, 1); 583 if (error) 584 goto unlocked_error; 585 586 #ifdef MAC 587 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair); 588 if (error) 589 goto locked_error; 590 #endif 591 if (amountpipekva > (3 * maxpipekva) / 4) { 592 if (!(rpipe->pipe_state & PIPE_DIRECTW) && 593 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 594 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 595 (piperesizeallowed == 1)) { 596 PIPE_UNLOCK(rpipe); 597 pipespace(rpipe, SMALL_PIPE_SIZE); 598 PIPE_LOCK(rpipe); 599 } 600 } 601 602 while (uio->uio_resid) { 603 /* 604 * normal pipe buffer receive 605 */ 606 if (rpipe->pipe_buffer.cnt > 0) { 607 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 608 if (size > rpipe->pipe_buffer.cnt) 609 size = rpipe->pipe_buffer.cnt; 610 if (size > (u_int) uio->uio_resid) 611 size = (u_int) uio->uio_resid; 612 613 PIPE_UNLOCK(rpipe); 614 error = uiomove( 615 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 616 size, uio); 617 PIPE_LOCK(rpipe); 618 if (error) 619 break; 620 621 rpipe->pipe_buffer.out += size; 622 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 623 rpipe->pipe_buffer.out = 0; 624 625 rpipe->pipe_buffer.cnt -= size; 626 627 /* 628 * If there is no more to read in the pipe, reset 629 * its pointers to the beginning. This improves 630 * cache hit stats. 631 */ 632 if (rpipe->pipe_buffer.cnt == 0) { 633 rpipe->pipe_buffer.in = 0; 634 rpipe->pipe_buffer.out = 0; 635 } 636 nread += size; 637 #ifndef PIPE_NODIRECT 638 /* 639 * Direct copy, bypassing a kernel buffer. 640 */ 641 } else if ((size = rpipe->pipe_map.cnt) && 642 (rpipe->pipe_state & PIPE_DIRECTW)) { 643 if (size > (u_int) uio->uio_resid) 644 size = (u_int) uio->uio_resid; 645 646 PIPE_UNLOCK(rpipe); 647 error = uiomove_fromphys(rpipe->pipe_map.ms, 648 rpipe->pipe_map.pos, size, uio); 649 PIPE_LOCK(rpipe); 650 if (error) 651 break; 652 nread += size; 653 rpipe->pipe_map.pos += size; 654 rpipe->pipe_map.cnt -= size; 655 if (rpipe->pipe_map.cnt == 0) { 656 rpipe->pipe_state &= ~PIPE_DIRECTW; 657 wakeup(rpipe); 658 } 659 #endif 660 } else { 661 /* 662 * detect EOF condition 663 * read returns 0 on EOF, no need to set error 664 */ 665 if (rpipe->pipe_state & PIPE_EOF) 666 break; 667 668 /* 669 * If the "write-side" has been blocked, wake it up now. 670 */ 671 if (rpipe->pipe_state & PIPE_WANTW) { 672 rpipe->pipe_state &= ~PIPE_WANTW; 673 wakeup(rpipe); 674 } 675 676 /* 677 * Break if some data was read. 678 */ 679 if (nread > 0) 680 break; 681 682 /* 683 * Unlock the pipe buffer for our remaining processing. 684 * We will either break out with an error or we will 685 * sleep and relock to loop. 686 */ 687 pipeunlock(rpipe); 688 689 /* 690 * Handle non-blocking mode operation or 691 * wait for more data. 692 */ 693 if (fp->f_flag & FNONBLOCK) { 694 error = EAGAIN; 695 } else { 696 rpipe->pipe_state |= PIPE_WANTR; 697 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 698 PRIBIO | PCATCH, 699 "piperd", 0)) == 0) 700 error = pipelock(rpipe, 1); 701 } 702 if (error) 703 goto unlocked_error; 704 } 705 } 706 #ifdef MAC 707 locked_error: 708 #endif 709 pipeunlock(rpipe); 710 711 /* XXX: should probably do this before getting any locks. */ 712 if (error == 0) 713 vfs_timestamp(&rpipe->pipe_atime); 714 unlocked_error: 715 --rpipe->pipe_busy; 716 717 /* 718 * PIPE_WANT processing only makes sense if pipe_busy is 0. 719 */ 720 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 721 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 722 wakeup(rpipe); 723 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 724 /* 725 * Handle write blocking hysteresis. 726 */ 727 if (rpipe->pipe_state & PIPE_WANTW) { 728 rpipe->pipe_state &= ~PIPE_WANTW; 729 wakeup(rpipe); 730 } 731 } 732 733 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 734 pipeselwakeup(rpipe); 735 736 PIPE_UNLOCK(rpipe); 737 return (error); 738 } 739 740 #ifndef PIPE_NODIRECT 741 /* 742 * Map the sending processes' buffer into kernel space and wire it. 743 * This is similar to a physical write operation. 744 */ 745 static int 746 pipe_build_write_buffer(wpipe, uio) 747 struct pipe *wpipe; 748 struct uio *uio; 749 { 750 pmap_t pmap; 751 u_int size; 752 int i, j; 753 vm_offset_t addr, endaddr; 754 755 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 756 KASSERT(wpipe->pipe_state & PIPE_DIRECTW, 757 ("Clone attempt on non-direct write pipe!")); 758 759 size = (u_int) uio->uio_iov->iov_len; 760 if (size > wpipe->pipe_buffer.size) 761 size = wpipe->pipe_buffer.size; 762 763 pmap = vmspace_pmap(curproc->p_vmspace); 764 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 765 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 766 if (endaddr < addr) 767 return (EFAULT); 768 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 769 /* 770 * vm_fault_quick() can sleep. Consequently, 771 * vm_page_lock_queue() and vm_page_unlock_queue() 772 * should not be performed outside of this loop. 773 */ 774 race: 775 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) { 776 vm_page_lock_queues(); 777 for (j = 0; j < i; j++) 778 vm_page_unhold(wpipe->pipe_map.ms[j]); 779 vm_page_unlock_queues(); 780 return (EFAULT); 781 } 782 wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr, 783 VM_PROT_READ); 784 if (wpipe->pipe_map.ms[i] == NULL) 785 goto race; 786 } 787 788 /* 789 * set up the control block 790 */ 791 wpipe->pipe_map.npages = i; 792 wpipe->pipe_map.pos = 793 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 794 wpipe->pipe_map.cnt = size; 795 796 /* 797 * and update the uio data 798 */ 799 800 uio->uio_iov->iov_len -= size; 801 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size; 802 if (uio->uio_iov->iov_len == 0) 803 uio->uio_iov++; 804 uio->uio_resid -= size; 805 uio->uio_offset += size; 806 return (0); 807 } 808 809 /* 810 * unmap and unwire the process buffer 811 */ 812 static void 813 pipe_destroy_write_buffer(wpipe) 814 struct pipe *wpipe; 815 { 816 int i; 817 818 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 819 vm_page_lock_queues(); 820 for (i = 0; i < wpipe->pipe_map.npages; i++) { 821 vm_page_unhold(wpipe->pipe_map.ms[i]); 822 } 823 vm_page_unlock_queues(); 824 wpipe->pipe_map.npages = 0; 825 } 826 827 /* 828 * In the case of a signal, the writing process might go away. This 829 * code copies the data into the circular buffer so that the source 830 * pages can be freed without loss of data. 831 */ 832 static void 833 pipe_clone_write_buffer(wpipe) 834 struct pipe *wpipe; 835 { 836 struct uio uio; 837 struct iovec iov; 838 int size; 839 int pos; 840 841 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 842 size = wpipe->pipe_map.cnt; 843 pos = wpipe->pipe_map.pos; 844 845 wpipe->pipe_buffer.in = size; 846 wpipe->pipe_buffer.out = 0; 847 wpipe->pipe_buffer.cnt = size; 848 wpipe->pipe_state &= ~PIPE_DIRECTW; 849 850 PIPE_UNLOCK(wpipe); 851 iov.iov_base = wpipe->pipe_buffer.buffer; 852 iov.iov_len = size; 853 uio.uio_iov = &iov; 854 uio.uio_iovcnt = 1; 855 uio.uio_offset = 0; 856 uio.uio_resid = size; 857 uio.uio_segflg = UIO_SYSSPACE; 858 uio.uio_rw = UIO_READ; 859 uio.uio_td = curthread; 860 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio); 861 PIPE_LOCK(wpipe); 862 pipe_destroy_write_buffer(wpipe); 863 } 864 865 /* 866 * This implements the pipe buffer write mechanism. Note that only 867 * a direct write OR a normal pipe write can be pending at any given time. 868 * If there are any characters in the pipe buffer, the direct write will 869 * be deferred until the receiving process grabs all of the bytes from 870 * the pipe buffer. Then the direct mapping write is set-up. 871 */ 872 static int 873 pipe_direct_write(wpipe, uio) 874 struct pipe *wpipe; 875 struct uio *uio; 876 { 877 int error; 878 879 retry: 880 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 881 error = pipelock(wpipe, 1); 882 if (wpipe->pipe_state & PIPE_EOF) 883 error = EPIPE; 884 if (error) { 885 pipeunlock(wpipe); 886 goto error1; 887 } 888 while (wpipe->pipe_state & PIPE_DIRECTW) { 889 if (wpipe->pipe_state & PIPE_WANTR) { 890 wpipe->pipe_state &= ~PIPE_WANTR; 891 wakeup(wpipe); 892 } 893 pipeselwakeup(wpipe); 894 wpipe->pipe_state |= PIPE_WANTW; 895 pipeunlock(wpipe); 896 error = msleep(wpipe, PIPE_MTX(wpipe), 897 PRIBIO | PCATCH, "pipdww", 0); 898 if (error) 899 goto error1; 900 else 901 goto retry; 902 } 903 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 904 if (wpipe->pipe_buffer.cnt > 0) { 905 if (wpipe->pipe_state & PIPE_WANTR) { 906 wpipe->pipe_state &= ~PIPE_WANTR; 907 wakeup(wpipe); 908 } 909 pipeselwakeup(wpipe); 910 wpipe->pipe_state |= PIPE_WANTW; 911 pipeunlock(wpipe); 912 error = msleep(wpipe, PIPE_MTX(wpipe), 913 PRIBIO | PCATCH, "pipdwc", 0); 914 if (error) 915 goto error1; 916 else 917 goto retry; 918 } 919 920 wpipe->pipe_state |= PIPE_DIRECTW; 921 922 PIPE_UNLOCK(wpipe); 923 error = pipe_build_write_buffer(wpipe, uio); 924 PIPE_LOCK(wpipe); 925 if (error) { 926 wpipe->pipe_state &= ~PIPE_DIRECTW; 927 pipeunlock(wpipe); 928 goto error1; 929 } 930 931 error = 0; 932 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 933 if (wpipe->pipe_state & PIPE_EOF) { 934 pipe_destroy_write_buffer(wpipe); 935 pipeselwakeup(wpipe); 936 pipeunlock(wpipe); 937 error = EPIPE; 938 goto error1; 939 } 940 if (wpipe->pipe_state & PIPE_WANTR) { 941 wpipe->pipe_state &= ~PIPE_WANTR; 942 wakeup(wpipe); 943 } 944 pipeselwakeup(wpipe); 945 pipeunlock(wpipe); 946 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 947 "pipdwt", 0); 948 pipelock(wpipe, 0); 949 } 950 951 if (wpipe->pipe_state & PIPE_EOF) 952 error = EPIPE; 953 if (wpipe->pipe_state & PIPE_DIRECTW) { 954 /* 955 * this bit of trickery substitutes a kernel buffer for 956 * the process that might be going away. 957 */ 958 pipe_clone_write_buffer(wpipe); 959 } else { 960 pipe_destroy_write_buffer(wpipe); 961 } 962 pipeunlock(wpipe); 963 return (error); 964 965 error1: 966 wakeup(wpipe); 967 return (error); 968 } 969 #endif 970 971 static int 972 pipe_write(fp, uio, active_cred, flags, td) 973 struct file *fp; 974 struct uio *uio; 975 struct ucred *active_cred; 976 struct thread *td; 977 int flags; 978 { 979 int error = 0; 980 int desiredsize, orig_resid; 981 struct pipe *wpipe, *rpipe; 982 983 rpipe = fp->f_data; 984 wpipe = rpipe->pipe_peer; 985 986 PIPE_LOCK(rpipe); 987 error = pipelock(wpipe, 1); 988 if (error) { 989 PIPE_UNLOCK(rpipe); 990 return (error); 991 } 992 /* 993 * detect loss of pipe read side, issue SIGPIPE if lost. 994 */ 995 if (wpipe->pipe_present != PIPE_ACTIVE || 996 (wpipe->pipe_state & PIPE_EOF)) { 997 pipeunlock(wpipe); 998 PIPE_UNLOCK(rpipe); 999 return (EPIPE); 1000 } 1001 #ifdef MAC 1002 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair); 1003 if (error) { 1004 pipeunlock(wpipe); 1005 PIPE_UNLOCK(rpipe); 1006 return (error); 1007 } 1008 #endif 1009 ++wpipe->pipe_busy; 1010 1011 /* Choose a larger size if it's advantageous */ 1012 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size); 1013 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) { 1014 if (piperesizeallowed != 1) 1015 break; 1016 if (amountpipekva > maxpipekva / 2) 1017 break; 1018 if (desiredsize == BIG_PIPE_SIZE) 1019 break; 1020 desiredsize = desiredsize * 2; 1021 } 1022 1023 /* Choose a smaller size if we're in a OOM situation */ 1024 if ((amountpipekva > (3 * maxpipekva) / 4) && 1025 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) && 1026 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) && 1027 (piperesizeallowed == 1)) 1028 desiredsize = SMALL_PIPE_SIZE; 1029 1030 /* Resize if the above determined that a new size was necessary */ 1031 if ((desiredsize != wpipe->pipe_buffer.size) && 1032 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) { 1033 PIPE_UNLOCK(wpipe); 1034 pipespace(wpipe, desiredsize); 1035 PIPE_LOCK(wpipe); 1036 } 1037 if (wpipe->pipe_buffer.size == 0) { 1038 /* 1039 * This can only happen for reverse direction use of pipes 1040 * in a complete OOM situation. 1041 */ 1042 error = ENOMEM; 1043 --wpipe->pipe_busy; 1044 pipeunlock(wpipe); 1045 PIPE_UNLOCK(wpipe); 1046 return (error); 1047 } 1048 1049 pipeunlock(wpipe); 1050 1051 orig_resid = uio->uio_resid; 1052 1053 while (uio->uio_resid) { 1054 int space; 1055 1056 pipelock(wpipe, 0); 1057 if (wpipe->pipe_state & PIPE_EOF) { 1058 pipeunlock(wpipe); 1059 error = EPIPE; 1060 break; 1061 } 1062 #ifndef PIPE_NODIRECT 1063 /* 1064 * If the transfer is large, we can gain performance if 1065 * we do process-to-process copies directly. 1066 * If the write is non-blocking, we don't use the 1067 * direct write mechanism. 1068 * 1069 * The direct write mechanism will detect the reader going 1070 * away on us. 1071 */ 1072 if (uio->uio_segflg == UIO_USERSPACE && 1073 uio->uio_iov->iov_len >= PIPE_MINDIRECT && 1074 wpipe->pipe_buffer.size >= PIPE_MINDIRECT && 1075 (fp->f_flag & FNONBLOCK) == 0) { 1076 pipeunlock(wpipe); 1077 error = pipe_direct_write(wpipe, uio); 1078 if (error) 1079 break; 1080 continue; 1081 } 1082 #endif 1083 1084 /* 1085 * Pipe buffered writes cannot be coincidental with 1086 * direct writes. We wait until the currently executing 1087 * direct write is completed before we start filling the 1088 * pipe buffer. We break out if a signal occurs or the 1089 * reader goes away. 1090 */ 1091 if (wpipe->pipe_state & PIPE_DIRECTW) { 1092 if (wpipe->pipe_state & PIPE_WANTR) { 1093 wpipe->pipe_state &= ~PIPE_WANTR; 1094 wakeup(wpipe); 1095 } 1096 pipeselwakeup(wpipe); 1097 wpipe->pipe_state |= PIPE_WANTW; 1098 pipeunlock(wpipe); 1099 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 1100 "pipbww", 0); 1101 if (error) 1102 break; 1103 else 1104 continue; 1105 } 1106 1107 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1108 1109 /* Writes of size <= PIPE_BUF must be atomic. */ 1110 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 1111 space = 0; 1112 1113 if (space > 0) { 1114 int size; /* Transfer size */ 1115 int segsize; /* first segment to transfer */ 1116 1117 /* 1118 * Transfer size is minimum of uio transfer 1119 * and free space in pipe buffer. 1120 */ 1121 if (space > uio->uio_resid) 1122 size = uio->uio_resid; 1123 else 1124 size = space; 1125 /* 1126 * First segment to transfer is minimum of 1127 * transfer size and contiguous space in 1128 * pipe buffer. If first segment to transfer 1129 * is less than the transfer size, we've got 1130 * a wraparound in the buffer. 1131 */ 1132 segsize = wpipe->pipe_buffer.size - 1133 wpipe->pipe_buffer.in; 1134 if (segsize > size) 1135 segsize = size; 1136 1137 /* Transfer first segment */ 1138 1139 PIPE_UNLOCK(rpipe); 1140 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1141 segsize, uio); 1142 PIPE_LOCK(rpipe); 1143 1144 if (error == 0 && segsize < size) { 1145 KASSERT(wpipe->pipe_buffer.in + segsize == 1146 wpipe->pipe_buffer.size, 1147 ("Pipe buffer wraparound disappeared")); 1148 /* 1149 * Transfer remaining part now, to 1150 * support atomic writes. Wraparound 1151 * happened. 1152 */ 1153 1154 PIPE_UNLOCK(rpipe); 1155 error = uiomove( 1156 &wpipe->pipe_buffer.buffer[0], 1157 size - segsize, uio); 1158 PIPE_LOCK(rpipe); 1159 } 1160 if (error == 0) { 1161 wpipe->pipe_buffer.in += size; 1162 if (wpipe->pipe_buffer.in >= 1163 wpipe->pipe_buffer.size) { 1164 KASSERT(wpipe->pipe_buffer.in == 1165 size - segsize + 1166 wpipe->pipe_buffer.size, 1167 ("Expected wraparound bad")); 1168 wpipe->pipe_buffer.in = size - segsize; 1169 } 1170 1171 wpipe->pipe_buffer.cnt += size; 1172 KASSERT(wpipe->pipe_buffer.cnt <= 1173 wpipe->pipe_buffer.size, 1174 ("Pipe buffer overflow")); 1175 } 1176 pipeunlock(wpipe); 1177 if (error != 0) 1178 break; 1179 } else { 1180 /* 1181 * If the "read-side" has been blocked, wake it up now. 1182 */ 1183 if (wpipe->pipe_state & PIPE_WANTR) { 1184 wpipe->pipe_state &= ~PIPE_WANTR; 1185 wakeup(wpipe); 1186 } 1187 1188 /* 1189 * don't block on non-blocking I/O 1190 */ 1191 if (fp->f_flag & FNONBLOCK) { 1192 error = EAGAIN; 1193 pipeunlock(wpipe); 1194 break; 1195 } 1196 1197 /* 1198 * We have no more space and have something to offer, 1199 * wake up select/poll. 1200 */ 1201 pipeselwakeup(wpipe); 1202 1203 wpipe->pipe_state |= PIPE_WANTW; 1204 pipeunlock(wpipe); 1205 error = msleep(wpipe, PIPE_MTX(rpipe), 1206 PRIBIO | PCATCH, "pipewr", 0); 1207 if (error != 0) 1208 break; 1209 } 1210 } 1211 1212 pipelock(wpipe, 0); 1213 --wpipe->pipe_busy; 1214 1215 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1216 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1217 wakeup(wpipe); 1218 } else if (wpipe->pipe_buffer.cnt > 0) { 1219 /* 1220 * If we have put any characters in the buffer, we wake up 1221 * the reader. 1222 */ 1223 if (wpipe->pipe_state & PIPE_WANTR) { 1224 wpipe->pipe_state &= ~PIPE_WANTR; 1225 wakeup(wpipe); 1226 } 1227 } 1228 1229 /* 1230 * Don't return EPIPE if I/O was successful 1231 */ 1232 if ((wpipe->pipe_buffer.cnt == 0) && 1233 (uio->uio_resid == 0) && 1234 (error == EPIPE)) { 1235 error = 0; 1236 } 1237 1238 if (error == 0) 1239 vfs_timestamp(&wpipe->pipe_mtime); 1240 1241 /* 1242 * We have something to offer, 1243 * wake up select/poll. 1244 */ 1245 if (wpipe->pipe_buffer.cnt) 1246 pipeselwakeup(wpipe); 1247 1248 pipeunlock(wpipe); 1249 PIPE_UNLOCK(rpipe); 1250 return (error); 1251 } 1252 1253 /* ARGSUSED */ 1254 static int 1255 pipe_truncate(fp, length, active_cred, td) 1256 struct file *fp; 1257 off_t length; 1258 struct ucred *active_cred; 1259 struct thread *td; 1260 { 1261 1262 return (EINVAL); 1263 } 1264 1265 /* 1266 * we implement a very minimal set of ioctls for compatibility with sockets. 1267 */ 1268 static int 1269 pipe_ioctl(fp, cmd, data, active_cred, td) 1270 struct file *fp; 1271 u_long cmd; 1272 void *data; 1273 struct ucred *active_cred; 1274 struct thread *td; 1275 { 1276 struct pipe *mpipe = fp->f_data; 1277 int error; 1278 1279 PIPE_LOCK(mpipe); 1280 1281 #ifdef MAC 1282 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data); 1283 if (error) { 1284 PIPE_UNLOCK(mpipe); 1285 return (error); 1286 } 1287 #endif 1288 1289 error = 0; 1290 switch (cmd) { 1291 1292 case FIONBIO: 1293 break; 1294 1295 case FIOASYNC: 1296 if (*(int *)data) { 1297 mpipe->pipe_state |= PIPE_ASYNC; 1298 } else { 1299 mpipe->pipe_state &= ~PIPE_ASYNC; 1300 } 1301 break; 1302 1303 case FIONREAD: 1304 if (mpipe->pipe_state & PIPE_DIRECTW) 1305 *(int *)data = mpipe->pipe_map.cnt; 1306 else 1307 *(int *)data = mpipe->pipe_buffer.cnt; 1308 break; 1309 1310 case FIOSETOWN: 1311 PIPE_UNLOCK(mpipe); 1312 error = fsetown(*(int *)data, &mpipe->pipe_sigio); 1313 goto out_unlocked; 1314 1315 case FIOGETOWN: 1316 *(int *)data = fgetown(&mpipe->pipe_sigio); 1317 break; 1318 1319 /* This is deprecated, FIOSETOWN should be used instead. */ 1320 case TIOCSPGRP: 1321 PIPE_UNLOCK(mpipe); 1322 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio); 1323 goto out_unlocked; 1324 1325 /* This is deprecated, FIOGETOWN should be used instead. */ 1326 case TIOCGPGRP: 1327 *(int *)data = -fgetown(&mpipe->pipe_sigio); 1328 break; 1329 1330 default: 1331 error = ENOTTY; 1332 break; 1333 } 1334 PIPE_UNLOCK(mpipe); 1335 out_unlocked: 1336 return (error); 1337 } 1338 1339 static int 1340 pipe_poll(fp, events, active_cred, td) 1341 struct file *fp; 1342 int events; 1343 struct ucred *active_cred; 1344 struct thread *td; 1345 { 1346 struct pipe *rpipe = fp->f_data; 1347 struct pipe *wpipe; 1348 int revents = 0; 1349 #ifdef MAC 1350 int error; 1351 #endif 1352 1353 wpipe = rpipe->pipe_peer; 1354 PIPE_LOCK(rpipe); 1355 #ifdef MAC 1356 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair); 1357 if (error) 1358 goto locked_error; 1359 #endif 1360 if (events & (POLLIN | POLLRDNORM)) 1361 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1362 (rpipe->pipe_buffer.cnt > 0)) 1363 revents |= events & (POLLIN | POLLRDNORM); 1364 1365 if (events & (POLLOUT | POLLWRNORM)) 1366 if (wpipe->pipe_present != PIPE_ACTIVE || 1367 (wpipe->pipe_state & PIPE_EOF) || 1368 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1369 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1370 revents |= events & (POLLOUT | POLLWRNORM); 1371 1372 if ((events & POLLINIGNEOF) == 0) { 1373 if (rpipe->pipe_state & PIPE_EOF) { 1374 revents |= (events & (POLLIN | POLLRDNORM)); 1375 if (wpipe->pipe_present != PIPE_ACTIVE || 1376 (wpipe->pipe_state & PIPE_EOF)) 1377 revents |= POLLHUP; 1378 } 1379 } 1380 1381 if (revents == 0) { 1382 if (events & (POLLIN | POLLRDNORM)) { 1383 selrecord(td, &rpipe->pipe_sel); 1384 if (SEL_WAITING(&rpipe->pipe_sel)) 1385 rpipe->pipe_state |= PIPE_SEL; 1386 } 1387 1388 if (events & (POLLOUT | POLLWRNORM)) { 1389 selrecord(td, &wpipe->pipe_sel); 1390 if (SEL_WAITING(&wpipe->pipe_sel)) 1391 wpipe->pipe_state |= PIPE_SEL; 1392 } 1393 } 1394 #ifdef MAC 1395 locked_error: 1396 #endif 1397 PIPE_UNLOCK(rpipe); 1398 1399 return (revents); 1400 } 1401 1402 /* 1403 * We shouldn't need locks here as we're doing a read and this should 1404 * be a natural race. 1405 */ 1406 static int 1407 pipe_stat(fp, ub, active_cred, td) 1408 struct file *fp; 1409 struct stat *ub; 1410 struct ucred *active_cred; 1411 struct thread *td; 1412 { 1413 struct pipe *pipe = fp->f_data; 1414 #ifdef MAC 1415 int error; 1416 1417 PIPE_LOCK(pipe); 1418 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair); 1419 PIPE_UNLOCK(pipe); 1420 if (error) 1421 return (error); 1422 #endif 1423 bzero(ub, sizeof(*ub)); 1424 ub->st_mode = S_IFIFO; 1425 ub->st_blksize = PAGE_SIZE; 1426 if (pipe->pipe_state & PIPE_DIRECTW) 1427 ub->st_size = pipe->pipe_map.cnt; 1428 else 1429 ub->st_size = pipe->pipe_buffer.cnt; 1430 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1431 ub->st_atimespec = pipe->pipe_atime; 1432 ub->st_mtimespec = pipe->pipe_mtime; 1433 ub->st_ctimespec = pipe->pipe_ctime; 1434 ub->st_uid = fp->f_cred->cr_uid; 1435 ub->st_gid = fp->f_cred->cr_gid; 1436 /* 1437 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 1438 * XXX (st_dev, st_ino) should be unique. 1439 */ 1440 return (0); 1441 } 1442 1443 /* ARGSUSED */ 1444 static int 1445 pipe_close(fp, td) 1446 struct file *fp; 1447 struct thread *td; 1448 { 1449 struct pipe *cpipe = fp->f_data; 1450 1451 fp->f_ops = &badfileops; 1452 fp->f_data = NULL; 1453 funsetown(&cpipe->pipe_sigio); 1454 pipeclose(cpipe); 1455 return (0); 1456 } 1457 1458 static void 1459 pipe_free_kmem(cpipe) 1460 struct pipe *cpipe; 1461 { 1462 1463 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), 1464 ("pipe_free_kmem: pipe mutex locked")); 1465 1466 if (cpipe->pipe_buffer.buffer != NULL) { 1467 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size); 1468 vm_map_remove(pipe_map, 1469 (vm_offset_t)cpipe->pipe_buffer.buffer, 1470 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size); 1471 cpipe->pipe_buffer.buffer = NULL; 1472 } 1473 #ifndef PIPE_NODIRECT 1474 { 1475 cpipe->pipe_map.cnt = 0; 1476 cpipe->pipe_map.pos = 0; 1477 cpipe->pipe_map.npages = 0; 1478 } 1479 #endif 1480 } 1481 1482 /* 1483 * shutdown the pipe 1484 */ 1485 static void 1486 pipeclose(cpipe) 1487 struct pipe *cpipe; 1488 { 1489 struct pipepair *pp; 1490 struct pipe *ppipe; 1491 1492 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL")); 1493 1494 PIPE_LOCK(cpipe); 1495 pipelock(cpipe, 0); 1496 pp = cpipe->pipe_pair; 1497 1498 pipeselwakeup(cpipe); 1499 1500 /* 1501 * If the other side is blocked, wake it up saying that 1502 * we want to close it down. 1503 */ 1504 cpipe->pipe_state |= PIPE_EOF; 1505 while (cpipe->pipe_busy) { 1506 wakeup(cpipe); 1507 cpipe->pipe_state |= PIPE_WANT; 1508 pipeunlock(cpipe); 1509 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1510 pipelock(cpipe, 0); 1511 } 1512 1513 1514 /* 1515 * Disconnect from peer, if any. 1516 */ 1517 ppipe = cpipe->pipe_peer; 1518 if (ppipe->pipe_present == PIPE_ACTIVE) { 1519 pipeselwakeup(ppipe); 1520 1521 ppipe->pipe_state |= PIPE_EOF; 1522 wakeup(ppipe); 1523 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0); 1524 } 1525 1526 /* 1527 * Mark this endpoint as free. Release kmem resources. We 1528 * don't mark this endpoint as unused until we've finished 1529 * doing that, or the pipe might disappear out from under 1530 * us. 1531 */ 1532 PIPE_UNLOCK(cpipe); 1533 pipe_free_kmem(cpipe); 1534 PIPE_LOCK(cpipe); 1535 cpipe->pipe_present = PIPE_CLOSING; 1536 pipeunlock(cpipe); 1537 1538 /* 1539 * knlist_clear() may sleep dropping the PIPE_MTX. Set the 1540 * PIPE_FINALIZED, that allows other end to free the 1541 * pipe_pair, only after the knotes are completely dismantled. 1542 */ 1543 knlist_clear(&cpipe->pipe_sel.si_note, 1); 1544 cpipe->pipe_present = PIPE_FINALIZED; 1545 knlist_destroy(&cpipe->pipe_sel.si_note); 1546 1547 /* 1548 * If both endpoints are now closed, release the memory for the 1549 * pipe pair. If not, unlock. 1550 */ 1551 if (ppipe->pipe_present == PIPE_FINALIZED) { 1552 PIPE_UNLOCK(cpipe); 1553 #ifdef MAC 1554 mac_pipe_destroy(pp); 1555 #endif 1556 uma_zfree(pipe_zone, cpipe->pipe_pair); 1557 } else 1558 PIPE_UNLOCK(cpipe); 1559 } 1560 1561 /*ARGSUSED*/ 1562 static int 1563 pipe_kqfilter(struct file *fp, struct knote *kn) 1564 { 1565 struct pipe *cpipe; 1566 1567 cpipe = kn->kn_fp->f_data; 1568 PIPE_LOCK(cpipe); 1569 switch (kn->kn_filter) { 1570 case EVFILT_READ: 1571 kn->kn_fop = &pipe_rfiltops; 1572 break; 1573 case EVFILT_WRITE: 1574 kn->kn_fop = &pipe_wfiltops; 1575 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) { 1576 /* other end of pipe has been closed */ 1577 PIPE_UNLOCK(cpipe); 1578 return (EPIPE); 1579 } 1580 cpipe = cpipe->pipe_peer; 1581 break; 1582 default: 1583 PIPE_UNLOCK(cpipe); 1584 return (EINVAL); 1585 } 1586 1587 knlist_add(&cpipe->pipe_sel.si_note, kn, 1); 1588 PIPE_UNLOCK(cpipe); 1589 return (0); 1590 } 1591 1592 static void 1593 filt_pipedetach(struct knote *kn) 1594 { 1595 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data; 1596 1597 PIPE_LOCK(cpipe); 1598 if (kn->kn_filter == EVFILT_WRITE) 1599 cpipe = cpipe->pipe_peer; 1600 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1); 1601 PIPE_UNLOCK(cpipe); 1602 } 1603 1604 /*ARGSUSED*/ 1605 static int 1606 filt_piperead(struct knote *kn, long hint) 1607 { 1608 struct pipe *rpipe = kn->kn_fp->f_data; 1609 struct pipe *wpipe = rpipe->pipe_peer; 1610 int ret; 1611 1612 PIPE_LOCK(rpipe); 1613 kn->kn_data = rpipe->pipe_buffer.cnt; 1614 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1615 kn->kn_data = rpipe->pipe_map.cnt; 1616 1617 if ((rpipe->pipe_state & PIPE_EOF) || 1618 wpipe->pipe_present != PIPE_ACTIVE || 1619 (wpipe->pipe_state & PIPE_EOF)) { 1620 kn->kn_flags |= EV_EOF; 1621 PIPE_UNLOCK(rpipe); 1622 return (1); 1623 } 1624 ret = kn->kn_data > 0; 1625 PIPE_UNLOCK(rpipe); 1626 return ret; 1627 } 1628 1629 /*ARGSUSED*/ 1630 static int 1631 filt_pipewrite(struct knote *kn, long hint) 1632 { 1633 struct pipe *rpipe = kn->kn_fp->f_data; 1634 struct pipe *wpipe = rpipe->pipe_peer; 1635 1636 PIPE_LOCK(rpipe); 1637 if (wpipe->pipe_present != PIPE_ACTIVE || 1638 (wpipe->pipe_state & PIPE_EOF)) { 1639 kn->kn_data = 0; 1640 kn->kn_flags |= EV_EOF; 1641 PIPE_UNLOCK(rpipe); 1642 return (1); 1643 } 1644 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1645 if (wpipe->pipe_state & PIPE_DIRECTW) 1646 kn->kn_data = 0; 1647 1648 PIPE_UNLOCK(rpipe); 1649 return (kn->kn_data >= PIPE_BUF); 1650 } 1651