1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD$ 20 */ 21 22 /* 23 * This file contains a high-performance replacement for the socket-based 24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 25 * all features of sockets, but does do everything that pipes normally 26 * do. 27 */ 28 29 /* 30 * This code has two modes of operation, a small write mode and a large 31 * write mode. The small write mode acts like conventional pipes with 32 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 33 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 35 * the receiving process can copy it directly from the pages in the sending 36 * process. 37 * 38 * If the sending process receives a signal, it is possible that it will 39 * go away, and certainly its address space can change, because control 40 * is returned back to the user-mode side. In that case, the pipe code 41 * arranges to copy the buffer supplied by the user process, to a pageable 42 * kernel buffer, and the receiving process will grab the data from the 43 * pageable kernel buffer. Since signals don't happen all that often, 44 * the copy operation is normally eliminated. 45 * 46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 47 * happen for small transfers so that the system will not spend all of 48 * its time context switching. PIPE_SIZE is constrained by the 49 * amount of kernel virtual memory. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/filedesc.h> 57 #include <sys/filio.h> 58 #include <sys/lock.h> 59 #include <sys/mutex.h> 60 #include <sys/ttycom.h> 61 #include <sys/stat.h> 62 #include <sys/poll.h> 63 #include <sys/selinfo.h> 64 #include <sys/signalvar.h> 65 #include <sys/sysproto.h> 66 #include <sys/pipe.h> 67 #include <sys/proc.h> 68 #include <sys/vnode.h> 69 #include <sys/uio.h> 70 #include <sys/event.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_extern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_zone.h> 81 82 /* 83 * Use this define if you want to disable *fancy* VM things. Expect an 84 * approx 30% decrease in transfer rate. This could be useful for 85 * NetBSD or OpenBSD. 86 */ 87 /* #define PIPE_NODIRECT */ 88 89 /* 90 * interfaces to the outside world 91 */ 92 static int pipe_read __P((struct file *fp, struct uio *uio, 93 struct ucred *cred, int flags, struct proc *p)); 94 static int pipe_write __P((struct file *fp, struct uio *uio, 95 struct ucred *cred, int flags, struct proc *p)); 96 static int pipe_close __P((struct file *fp, struct proc *p)); 97 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred, 98 struct proc *p)); 99 static int pipe_kqfilter __P((struct file *fp, struct knote *kn)); 100 static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p)); 101 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p)); 102 103 static struct fileops pipeops = { 104 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 105 pipe_stat, pipe_close 106 }; 107 108 static void filt_pipedetach(struct knote *kn); 109 static int filt_piperead(struct knote *kn, long hint); 110 static int filt_pipewrite(struct knote *kn, long hint); 111 112 static struct filterops pipe_rfiltops = 113 { 1, NULL, filt_pipedetach, filt_piperead }; 114 static struct filterops pipe_wfiltops = 115 { 1, NULL, filt_pipedetach, filt_pipewrite }; 116 117 118 /* 119 * Default pipe buffer size(s), this can be kind-of large now because pipe 120 * space is pageable. The pipe code will try to maintain locality of 121 * reference for performance reasons, so small amounts of outstanding I/O 122 * will not wipe the cache. 123 */ 124 #define MINPIPESIZE (PIPE_SIZE/3) 125 #define MAXPIPESIZE (2*PIPE_SIZE/3) 126 127 /* 128 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 129 * is there so that on large systems, we don't exhaust it. 130 */ 131 #define MAXPIPEKVA (8*1024*1024) 132 133 /* 134 * Limit for direct transfers, we cannot, of course limit 135 * the amount of kva for pipes in general though. 136 */ 137 #define LIMITPIPEKVA (16*1024*1024) 138 139 /* 140 * Limit the number of "big" pipes 141 */ 142 #define LIMITBIGPIPES 32 143 static int nbigpipe; 144 145 static int amountpipekva; 146 147 static void pipeclose __P((struct pipe *cpipe)); 148 static void pipe_free_kmem __P((struct pipe *cpipe)); 149 static int pipe_create __P((struct pipe **cpipep)); 150 static __inline int pipelock __P((struct pipe *cpipe, int catch)); 151 static __inline void pipeunlock __P((struct pipe *cpipe)); 152 static __inline void pipeselwakeup __P((struct pipe *cpipe)); 153 #ifndef PIPE_NODIRECT 154 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio)); 155 static void pipe_destroy_write_buffer __P((struct pipe *wpipe)); 156 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio)); 157 static void pipe_clone_write_buffer __P((struct pipe *wpipe)); 158 #endif 159 static int pipespace __P((struct pipe *cpipe, int size)); 160 161 static vm_zone_t pipe_zone; 162 163 /* 164 * The pipe system call for the DTYPE_PIPE type of pipes 165 */ 166 167 /* ARGSUSED */ 168 int 169 pipe(p, uap) 170 struct proc *p; 171 struct pipe_args /* { 172 int dummy; 173 } */ *uap; 174 { 175 struct filedesc *fdp = p->p_fd; 176 struct file *rf, *wf; 177 struct pipe *rpipe, *wpipe; 178 int fd, error; 179 180 if (pipe_zone == NULL) 181 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4); 182 183 rpipe = wpipe = NULL; 184 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 185 pipeclose(rpipe); 186 pipeclose(wpipe); 187 return (ENFILE); 188 } 189 190 rpipe->pipe_state |= PIPE_DIRECTOK; 191 wpipe->pipe_state |= PIPE_DIRECTOK; 192 193 error = falloc(p, &rf, &fd); 194 if (error) { 195 pipeclose(rpipe); 196 pipeclose(wpipe); 197 return (error); 198 } 199 fhold(rf); 200 p->p_retval[0] = fd; 201 202 /* 203 * Warning: once we've gotten past allocation of the fd for the 204 * read-side, we can only drop the read side via fdrop() in order 205 * to avoid races against processes which manage to dup() the read 206 * side while we are blocked trying to allocate the write side. 207 */ 208 rf->f_flag = FREAD | FWRITE; 209 rf->f_type = DTYPE_PIPE; 210 rf->f_data = (caddr_t)rpipe; 211 rf->f_ops = &pipeops; 212 error = falloc(p, &wf, &fd); 213 if (error) { 214 if (fdp->fd_ofiles[p->p_retval[0]] == rf) { 215 fdp->fd_ofiles[p->p_retval[0]] = NULL; 216 fdrop(rf, p); 217 } 218 fdrop(rf, p); 219 /* rpipe has been closed by fdrop(). */ 220 pipeclose(wpipe); 221 return (error); 222 } 223 wf->f_flag = FREAD | FWRITE; 224 wf->f_type = DTYPE_PIPE; 225 wf->f_data = (caddr_t)wpipe; 226 wf->f_ops = &pipeops; 227 p->p_retval[1] = fd; 228 229 rpipe->pipe_peer = wpipe; 230 wpipe->pipe_peer = rpipe; 231 fdrop(rf, p); 232 233 return (0); 234 } 235 236 /* 237 * Allocate kva for pipe circular buffer, the space is pageable 238 * This routine will 'realloc' the size of a pipe safely, if it fails 239 * it will retain the old buffer. 240 * If it fails it will return ENOMEM. 241 */ 242 static int 243 pipespace(cpipe, size) 244 struct pipe *cpipe; 245 int size; 246 { 247 struct vm_object *object; 248 caddr_t buffer; 249 int npages, error; 250 251 npages = round_page(size)/PAGE_SIZE; 252 /* 253 * Create an object, I don't like the idea of paging to/from 254 * kernel_object. 255 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 256 */ 257 mtx_lock(&vm_mtx); 258 object = vm_object_allocate(OBJT_DEFAULT, npages); 259 buffer = (caddr_t) vm_map_min(kernel_map); 260 261 /* 262 * Insert the object into the kernel map, and allocate kva for it. 263 * The map entry is, by default, pageable. 264 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 265 */ 266 error = vm_map_find(kernel_map, object, 0, 267 (vm_offset_t *) &buffer, size, 1, 268 VM_PROT_ALL, VM_PROT_ALL, 0); 269 270 if (error != KERN_SUCCESS) { 271 vm_object_deallocate(object); 272 mtx_unlock(&vm_mtx); 273 return (ENOMEM); 274 } 275 276 /* free old resources if we're resizing */ 277 pipe_free_kmem(cpipe); 278 mtx_unlock(&vm_mtx); 279 cpipe->pipe_buffer.object = object; 280 cpipe->pipe_buffer.buffer = buffer; 281 cpipe->pipe_buffer.size = size; 282 cpipe->pipe_buffer.in = 0; 283 cpipe->pipe_buffer.out = 0; 284 cpipe->pipe_buffer.cnt = 0; 285 amountpipekva += cpipe->pipe_buffer.size; 286 return (0); 287 } 288 289 /* 290 * initialize and allocate VM and memory for pipe 291 */ 292 static int 293 pipe_create(cpipep) 294 struct pipe **cpipep; 295 { 296 struct pipe *cpipe; 297 int error; 298 299 *cpipep = zalloc(pipe_zone); 300 if (*cpipep == NULL) 301 return (ENOMEM); 302 303 cpipe = *cpipep; 304 305 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */ 306 cpipe->pipe_buffer.object = NULL; 307 #ifndef PIPE_NODIRECT 308 cpipe->pipe_map.kva = NULL; 309 #endif 310 /* 311 * protect so pipeclose() doesn't follow a junk pointer 312 * if pipespace() fails. 313 */ 314 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel)); 315 cpipe->pipe_state = 0; 316 cpipe->pipe_peer = NULL; 317 cpipe->pipe_busy = 0; 318 319 #ifndef PIPE_NODIRECT 320 /* 321 * pipe data structure initializations to support direct pipe I/O 322 */ 323 cpipe->pipe_map.cnt = 0; 324 cpipe->pipe_map.kva = 0; 325 cpipe->pipe_map.pos = 0; 326 cpipe->pipe_map.npages = 0; 327 /* cpipe->pipe_map.ms[] = invalid */ 328 #endif 329 330 error = pipespace(cpipe, PIPE_SIZE); 331 if (error) 332 return (error); 333 334 vfs_timestamp(&cpipe->pipe_ctime); 335 cpipe->pipe_atime = cpipe->pipe_ctime; 336 cpipe->pipe_mtime = cpipe->pipe_ctime; 337 338 return (0); 339 } 340 341 342 /* 343 * lock a pipe for I/O, blocking other access 344 */ 345 static __inline int 346 pipelock(cpipe, catch) 347 struct pipe *cpipe; 348 int catch; 349 { 350 int error; 351 352 while (cpipe->pipe_state & PIPE_LOCK) { 353 cpipe->pipe_state |= PIPE_LWANT; 354 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO, 355 "pipelk", 0); 356 if (error != 0) 357 return (error); 358 } 359 cpipe->pipe_state |= PIPE_LOCK; 360 return (0); 361 } 362 363 /* 364 * unlock a pipe I/O lock 365 */ 366 static __inline void 367 pipeunlock(cpipe) 368 struct pipe *cpipe; 369 { 370 371 cpipe->pipe_state &= ~PIPE_LOCK; 372 if (cpipe->pipe_state & PIPE_LWANT) { 373 cpipe->pipe_state &= ~PIPE_LWANT; 374 wakeup(cpipe); 375 } 376 } 377 378 static __inline void 379 pipeselwakeup(cpipe) 380 struct pipe *cpipe; 381 { 382 383 if (cpipe->pipe_state & PIPE_SEL) { 384 cpipe->pipe_state &= ~PIPE_SEL; 385 selwakeup(&cpipe->pipe_sel); 386 } 387 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 388 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 389 KNOTE(&cpipe->pipe_sel.si_note, 0); 390 } 391 392 /* ARGSUSED */ 393 static int 394 pipe_read(fp, uio, cred, flags, p) 395 struct file *fp; 396 struct uio *uio; 397 struct ucred *cred; 398 struct proc *p; 399 int flags; 400 { 401 struct pipe *rpipe = (struct pipe *) fp->f_data; 402 int error; 403 int nread = 0; 404 u_int size; 405 406 ++rpipe->pipe_busy; 407 error = pipelock(rpipe, 1); 408 if (error) 409 goto unlocked_error; 410 411 while (uio->uio_resid) { 412 /* 413 * normal pipe buffer receive 414 */ 415 if (rpipe->pipe_buffer.cnt > 0) { 416 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 417 if (size > rpipe->pipe_buffer.cnt) 418 size = rpipe->pipe_buffer.cnt; 419 if (size > (u_int) uio->uio_resid) 420 size = (u_int) uio->uio_resid; 421 422 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 423 size, uio); 424 if (error) 425 break; 426 427 rpipe->pipe_buffer.out += size; 428 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 429 rpipe->pipe_buffer.out = 0; 430 431 rpipe->pipe_buffer.cnt -= size; 432 433 /* 434 * If there is no more to read in the pipe, reset 435 * its pointers to the beginning. This improves 436 * cache hit stats. 437 */ 438 if (rpipe->pipe_buffer.cnt == 0) { 439 rpipe->pipe_buffer.in = 0; 440 rpipe->pipe_buffer.out = 0; 441 } 442 nread += size; 443 #ifndef PIPE_NODIRECT 444 /* 445 * Direct copy, bypassing a kernel buffer. 446 */ 447 } else if ((size = rpipe->pipe_map.cnt) && 448 (rpipe->pipe_state & PIPE_DIRECTW)) { 449 caddr_t va; 450 if (size > (u_int) uio->uio_resid) 451 size = (u_int) uio->uio_resid; 452 453 va = (caddr_t) rpipe->pipe_map.kva + 454 rpipe->pipe_map.pos; 455 error = uiomove(va, size, uio); 456 if (error) 457 break; 458 nread += size; 459 rpipe->pipe_map.pos += size; 460 rpipe->pipe_map.cnt -= size; 461 if (rpipe->pipe_map.cnt == 0) { 462 rpipe->pipe_state &= ~PIPE_DIRECTW; 463 wakeup(rpipe); 464 } 465 #endif 466 } else { 467 /* 468 * detect EOF condition 469 * read returns 0 on EOF, no need to set error 470 */ 471 if (rpipe->pipe_state & PIPE_EOF) 472 break; 473 474 /* 475 * If the "write-side" has been blocked, wake it up now. 476 */ 477 if (rpipe->pipe_state & PIPE_WANTW) { 478 rpipe->pipe_state &= ~PIPE_WANTW; 479 wakeup(rpipe); 480 } 481 482 /* 483 * Break if some data was read. 484 */ 485 if (nread > 0) 486 break; 487 488 /* 489 * Unlock the pipe buffer for our remaining processing. We 490 * will either break out with an error or we will sleep and 491 * relock to loop. 492 */ 493 pipeunlock(rpipe); 494 495 /* 496 * Handle non-blocking mode operation or 497 * wait for more data. 498 */ 499 if (fp->f_flag & FNONBLOCK) { 500 error = EAGAIN; 501 } else { 502 rpipe->pipe_state |= PIPE_WANTR; 503 if ((error = tsleep(rpipe, PRIBIO | PCATCH, 504 "piperd", 0)) == 0) 505 error = pipelock(rpipe, 1); 506 } 507 if (error) 508 goto unlocked_error; 509 } 510 } 511 pipeunlock(rpipe); 512 513 if (error == 0) 514 vfs_timestamp(&rpipe->pipe_atime); 515 unlocked_error: 516 --rpipe->pipe_busy; 517 518 /* 519 * PIPE_WANT processing only makes sense if pipe_busy is 0. 520 */ 521 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 522 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 523 wakeup(rpipe); 524 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 525 /* 526 * Handle write blocking hysteresis. 527 */ 528 if (rpipe->pipe_state & PIPE_WANTW) { 529 rpipe->pipe_state &= ~PIPE_WANTW; 530 wakeup(rpipe); 531 } 532 } 533 534 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 535 pipeselwakeup(rpipe); 536 537 return (error); 538 } 539 540 #ifndef PIPE_NODIRECT 541 /* 542 * Map the sending processes' buffer into kernel space and wire it. 543 * This is similar to a physical write operation. 544 */ 545 static int 546 pipe_build_write_buffer(wpipe, uio) 547 struct pipe *wpipe; 548 struct uio *uio; 549 { 550 u_int size; 551 int i; 552 vm_offset_t addr, endaddr, paddr; 553 554 size = (u_int) uio->uio_iov->iov_len; 555 if (size > wpipe->pipe_buffer.size) 556 size = wpipe->pipe_buffer.size; 557 558 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 559 mtx_lock(&vm_mtx); 560 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 561 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 562 vm_page_t m; 563 564 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 || 565 (paddr = pmap_kextract(addr)) == 0) { 566 int j; 567 568 for (j = 0; j < i; j++) 569 vm_page_unwire(wpipe->pipe_map.ms[j], 1); 570 mtx_unlock(&vm_mtx); 571 return (EFAULT); 572 } 573 574 m = PHYS_TO_VM_PAGE(paddr); 575 vm_page_wire(m); 576 wpipe->pipe_map.ms[i] = m; 577 } 578 579 /* 580 * set up the control block 581 */ 582 wpipe->pipe_map.npages = i; 583 wpipe->pipe_map.pos = 584 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 585 wpipe->pipe_map.cnt = size; 586 587 /* 588 * and map the buffer 589 */ 590 if (wpipe->pipe_map.kva == 0) { 591 /* 592 * We need to allocate space for an extra page because the 593 * address range might (will) span pages at times. 594 */ 595 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map, 596 wpipe->pipe_buffer.size + PAGE_SIZE); 597 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE; 598 } 599 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms, 600 wpipe->pipe_map.npages); 601 602 mtx_unlock(&vm_mtx); 603 /* 604 * and update the uio data 605 */ 606 607 uio->uio_iov->iov_len -= size; 608 uio->uio_iov->iov_base += size; 609 if (uio->uio_iov->iov_len == 0) 610 uio->uio_iov++; 611 uio->uio_resid -= size; 612 uio->uio_offset += size; 613 return (0); 614 } 615 616 /* 617 * unmap and unwire the process buffer 618 */ 619 static void 620 pipe_destroy_write_buffer(wpipe) 621 struct pipe *wpipe; 622 { 623 int i; 624 625 mtx_lock(&vm_mtx); 626 if (wpipe->pipe_map.kva) { 627 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages); 628 629 if (amountpipekva > MAXPIPEKVA) { 630 vm_offset_t kva = wpipe->pipe_map.kva; 631 wpipe->pipe_map.kva = 0; 632 kmem_free(kernel_map, kva, 633 wpipe->pipe_buffer.size + PAGE_SIZE); 634 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE; 635 } 636 } 637 for (i = 0; i < wpipe->pipe_map.npages; i++) 638 vm_page_unwire(wpipe->pipe_map.ms[i], 1); 639 mtx_unlock(&vm_mtx); 640 } 641 642 /* 643 * In the case of a signal, the writing process might go away. This 644 * code copies the data into the circular buffer so that the source 645 * pages can be freed without loss of data. 646 */ 647 static void 648 pipe_clone_write_buffer(wpipe) 649 struct pipe *wpipe; 650 { 651 int size; 652 int pos; 653 654 size = wpipe->pipe_map.cnt; 655 pos = wpipe->pipe_map.pos; 656 bcopy((caddr_t) wpipe->pipe_map.kva + pos, 657 (caddr_t) wpipe->pipe_buffer.buffer, size); 658 659 wpipe->pipe_buffer.in = size; 660 wpipe->pipe_buffer.out = 0; 661 wpipe->pipe_buffer.cnt = size; 662 wpipe->pipe_state &= ~PIPE_DIRECTW; 663 664 pipe_destroy_write_buffer(wpipe); 665 } 666 667 /* 668 * This implements the pipe buffer write mechanism. Note that only 669 * a direct write OR a normal pipe write can be pending at any given time. 670 * If there are any characters in the pipe buffer, the direct write will 671 * be deferred until the receiving process grabs all of the bytes from 672 * the pipe buffer. Then the direct mapping write is set-up. 673 */ 674 static int 675 pipe_direct_write(wpipe, uio) 676 struct pipe *wpipe; 677 struct uio *uio; 678 { 679 int error; 680 681 retry: 682 while (wpipe->pipe_state & PIPE_DIRECTW) { 683 if (wpipe->pipe_state & PIPE_WANTR) { 684 wpipe->pipe_state &= ~PIPE_WANTR; 685 wakeup(wpipe); 686 } 687 wpipe->pipe_state |= PIPE_WANTW; 688 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0); 689 if (error) 690 goto error1; 691 if (wpipe->pipe_state & PIPE_EOF) { 692 error = EPIPE; 693 goto error1; 694 } 695 } 696 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 697 if (wpipe->pipe_buffer.cnt > 0) { 698 if (wpipe->pipe_state & PIPE_WANTR) { 699 wpipe->pipe_state &= ~PIPE_WANTR; 700 wakeup(wpipe); 701 } 702 703 wpipe->pipe_state |= PIPE_WANTW; 704 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0); 705 if (error) 706 goto error1; 707 if (wpipe->pipe_state & PIPE_EOF) { 708 error = EPIPE; 709 goto error1; 710 } 711 goto retry; 712 } 713 714 wpipe->pipe_state |= PIPE_DIRECTW; 715 716 error = pipe_build_write_buffer(wpipe, uio); 717 if (error) { 718 wpipe->pipe_state &= ~PIPE_DIRECTW; 719 goto error1; 720 } 721 722 error = 0; 723 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 724 if (wpipe->pipe_state & PIPE_EOF) { 725 pipelock(wpipe, 0); 726 pipe_destroy_write_buffer(wpipe); 727 pipeunlock(wpipe); 728 pipeselwakeup(wpipe); 729 error = EPIPE; 730 goto error1; 731 } 732 if (wpipe->pipe_state & PIPE_WANTR) { 733 wpipe->pipe_state &= ~PIPE_WANTR; 734 wakeup(wpipe); 735 } 736 pipeselwakeup(wpipe); 737 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0); 738 } 739 740 pipelock(wpipe,0); 741 if (wpipe->pipe_state & PIPE_DIRECTW) { 742 /* 743 * this bit of trickery substitutes a kernel buffer for 744 * the process that might be going away. 745 */ 746 pipe_clone_write_buffer(wpipe); 747 } else { 748 pipe_destroy_write_buffer(wpipe); 749 } 750 pipeunlock(wpipe); 751 return (error); 752 753 error1: 754 wakeup(wpipe); 755 return (error); 756 } 757 #endif 758 759 static int 760 pipe_write(fp, uio, cred, flags, p) 761 struct file *fp; 762 struct uio *uio; 763 struct ucred *cred; 764 struct proc *p; 765 int flags; 766 { 767 int error = 0; 768 int orig_resid; 769 struct pipe *wpipe, *rpipe; 770 771 rpipe = (struct pipe *) fp->f_data; 772 wpipe = rpipe->pipe_peer; 773 774 /* 775 * detect loss of pipe read side, issue SIGPIPE if lost. 776 */ 777 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 778 return (EPIPE); 779 } 780 ++wpipe->pipe_busy; 781 782 /* 783 * If it is advantageous to resize the pipe buffer, do 784 * so. 785 */ 786 if ((uio->uio_resid > PIPE_SIZE) && 787 (nbigpipe < LIMITBIGPIPES) && 788 (wpipe->pipe_state & PIPE_DIRECTW) == 0 && 789 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 790 (wpipe->pipe_buffer.cnt == 0)) { 791 792 if ((error = pipelock(wpipe,1)) == 0) { 793 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 794 nbigpipe++; 795 pipeunlock(wpipe); 796 } 797 } 798 799 /* 800 * If an early error occured unbusy and return, waking up any pending 801 * readers. 802 */ 803 if (error) { 804 --wpipe->pipe_busy; 805 if ((wpipe->pipe_busy == 0) && 806 (wpipe->pipe_state & PIPE_WANT)) { 807 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 808 wakeup(wpipe); 809 } 810 return(error); 811 } 812 813 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 814 815 orig_resid = uio->uio_resid; 816 817 while (uio->uio_resid) { 818 int space; 819 820 #ifndef PIPE_NODIRECT 821 /* 822 * If the transfer is large, we can gain performance if 823 * we do process-to-process copies directly. 824 * If the write is non-blocking, we don't use the 825 * direct write mechanism. 826 * 827 * The direct write mechanism will detect the reader going 828 * away on us. 829 */ 830 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) && 831 (fp->f_flag & FNONBLOCK) == 0 && 832 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) && 833 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) { 834 error = pipe_direct_write( wpipe, uio); 835 if (error) 836 break; 837 continue; 838 } 839 #endif 840 841 /* 842 * Pipe buffered writes cannot be coincidental with 843 * direct writes. We wait until the currently executing 844 * direct write is completed before we start filling the 845 * pipe buffer. We break out if a signal occurs or the 846 * reader goes away. 847 */ 848 retrywrite: 849 while (wpipe->pipe_state & PIPE_DIRECTW) { 850 if (wpipe->pipe_state & PIPE_WANTR) { 851 wpipe->pipe_state &= ~PIPE_WANTR; 852 wakeup(wpipe); 853 } 854 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0); 855 if (wpipe->pipe_state & PIPE_EOF) 856 break; 857 if (error) 858 break; 859 } 860 if (wpipe->pipe_state & PIPE_EOF) { 861 error = EPIPE; 862 break; 863 } 864 865 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 866 867 /* Writes of size <= PIPE_BUF must be atomic. */ 868 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 869 space = 0; 870 871 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) { 872 if ((error = pipelock(wpipe,1)) == 0) { 873 int size; /* Transfer size */ 874 int segsize; /* first segment to transfer */ 875 876 /* 877 * It is possible for a direct write to 878 * slip in on us... handle it here... 879 */ 880 if (wpipe->pipe_state & PIPE_DIRECTW) { 881 pipeunlock(wpipe); 882 goto retrywrite; 883 } 884 /* 885 * If a process blocked in uiomove, our 886 * value for space might be bad. 887 * 888 * XXX will we be ok if the reader has gone 889 * away here? 890 */ 891 if (space > wpipe->pipe_buffer.size - 892 wpipe->pipe_buffer.cnt) { 893 pipeunlock(wpipe); 894 goto retrywrite; 895 } 896 897 /* 898 * Transfer size is minimum of uio transfer 899 * and free space in pipe buffer. 900 */ 901 if (space > uio->uio_resid) 902 size = uio->uio_resid; 903 else 904 size = space; 905 /* 906 * First segment to transfer is minimum of 907 * transfer size and contiguous space in 908 * pipe buffer. If first segment to transfer 909 * is less than the transfer size, we've got 910 * a wraparound in the buffer. 911 */ 912 segsize = wpipe->pipe_buffer.size - 913 wpipe->pipe_buffer.in; 914 if (segsize > size) 915 segsize = size; 916 917 /* Transfer first segment */ 918 919 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 920 segsize, uio); 921 922 if (error == 0 && segsize < size) { 923 /* 924 * Transfer remaining part now, to 925 * support atomic writes. Wraparound 926 * happened. 927 */ 928 if (wpipe->pipe_buffer.in + segsize != 929 wpipe->pipe_buffer.size) 930 panic("Expected pipe buffer wraparound disappeared"); 931 932 error = uiomove(&wpipe->pipe_buffer.buffer[0], 933 size - segsize, uio); 934 } 935 if (error == 0) { 936 wpipe->pipe_buffer.in += size; 937 if (wpipe->pipe_buffer.in >= 938 wpipe->pipe_buffer.size) { 939 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 940 panic("Expected wraparound bad"); 941 wpipe->pipe_buffer.in = size - segsize; 942 } 943 944 wpipe->pipe_buffer.cnt += size; 945 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 946 panic("Pipe buffer overflow"); 947 948 } 949 pipeunlock(wpipe); 950 } 951 if (error) 952 break; 953 954 } else { 955 /* 956 * If the "read-side" has been blocked, wake it up now. 957 */ 958 if (wpipe->pipe_state & PIPE_WANTR) { 959 wpipe->pipe_state &= ~PIPE_WANTR; 960 wakeup(wpipe); 961 } 962 963 /* 964 * don't block on non-blocking I/O 965 */ 966 if (fp->f_flag & FNONBLOCK) { 967 error = EAGAIN; 968 break; 969 } 970 971 /* 972 * We have no more space and have something to offer, 973 * wake up select/poll. 974 */ 975 pipeselwakeup(wpipe); 976 977 wpipe->pipe_state |= PIPE_WANTW; 978 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0); 979 if (error != 0) 980 break; 981 /* 982 * If read side wants to go away, we just issue a signal 983 * to ourselves. 984 */ 985 if (wpipe->pipe_state & PIPE_EOF) { 986 error = EPIPE; 987 break; 988 } 989 } 990 } 991 992 --wpipe->pipe_busy; 993 994 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 995 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 996 wakeup(wpipe); 997 } else if (wpipe->pipe_buffer.cnt > 0) { 998 /* 999 * If we have put any characters in the buffer, we wake up 1000 * the reader. 1001 */ 1002 if (wpipe->pipe_state & PIPE_WANTR) { 1003 wpipe->pipe_state &= ~PIPE_WANTR; 1004 wakeup(wpipe); 1005 } 1006 } 1007 1008 /* 1009 * Don't return EPIPE if I/O was successful 1010 */ 1011 if ((wpipe->pipe_buffer.cnt == 0) && 1012 (uio->uio_resid == 0) && 1013 (error == EPIPE)) { 1014 error = 0; 1015 } 1016 1017 if (error == 0) 1018 vfs_timestamp(&wpipe->pipe_mtime); 1019 1020 /* 1021 * We have something to offer, 1022 * wake up select/poll. 1023 */ 1024 if (wpipe->pipe_buffer.cnt) 1025 pipeselwakeup(wpipe); 1026 1027 return (error); 1028 } 1029 1030 /* 1031 * we implement a very minimal set of ioctls for compatibility with sockets. 1032 */ 1033 int 1034 pipe_ioctl(fp, cmd, data, p) 1035 struct file *fp; 1036 u_long cmd; 1037 caddr_t data; 1038 struct proc *p; 1039 { 1040 struct pipe *mpipe = (struct pipe *)fp->f_data; 1041 1042 switch (cmd) { 1043 1044 case FIONBIO: 1045 return (0); 1046 1047 case FIOASYNC: 1048 if (*(int *)data) { 1049 mpipe->pipe_state |= PIPE_ASYNC; 1050 } else { 1051 mpipe->pipe_state &= ~PIPE_ASYNC; 1052 } 1053 return (0); 1054 1055 case FIONREAD: 1056 if (mpipe->pipe_state & PIPE_DIRECTW) 1057 *(int *)data = mpipe->pipe_map.cnt; 1058 else 1059 *(int *)data = mpipe->pipe_buffer.cnt; 1060 return (0); 1061 1062 case FIOSETOWN: 1063 return (fsetown(*(int *)data, &mpipe->pipe_sigio)); 1064 1065 case FIOGETOWN: 1066 *(int *)data = fgetown(mpipe->pipe_sigio); 1067 return (0); 1068 1069 /* This is deprecated, FIOSETOWN should be used instead. */ 1070 case TIOCSPGRP: 1071 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio)); 1072 1073 /* This is deprecated, FIOGETOWN should be used instead. */ 1074 case TIOCGPGRP: 1075 *(int *)data = -fgetown(mpipe->pipe_sigio); 1076 return (0); 1077 1078 } 1079 return (ENOTTY); 1080 } 1081 1082 int 1083 pipe_poll(fp, events, cred, p) 1084 struct file *fp; 1085 int events; 1086 struct ucred *cred; 1087 struct proc *p; 1088 { 1089 struct pipe *rpipe = (struct pipe *)fp->f_data; 1090 struct pipe *wpipe; 1091 int revents = 0; 1092 1093 wpipe = rpipe->pipe_peer; 1094 if (events & (POLLIN | POLLRDNORM)) 1095 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1096 (rpipe->pipe_buffer.cnt > 0) || 1097 (rpipe->pipe_state & PIPE_EOF)) 1098 revents |= events & (POLLIN | POLLRDNORM); 1099 1100 if (events & (POLLOUT | POLLWRNORM)) 1101 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1102 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1103 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1104 revents |= events & (POLLOUT | POLLWRNORM); 1105 1106 if ((rpipe->pipe_state & PIPE_EOF) || 1107 (wpipe == NULL) || 1108 (wpipe->pipe_state & PIPE_EOF)) 1109 revents |= POLLHUP; 1110 1111 if (revents == 0) { 1112 if (events & (POLLIN | POLLRDNORM)) { 1113 selrecord(p, &rpipe->pipe_sel); 1114 rpipe->pipe_state |= PIPE_SEL; 1115 } 1116 1117 if (events & (POLLOUT | POLLWRNORM)) { 1118 selrecord(p, &wpipe->pipe_sel); 1119 wpipe->pipe_state |= PIPE_SEL; 1120 } 1121 } 1122 1123 return (revents); 1124 } 1125 1126 static int 1127 pipe_stat(fp, ub, p) 1128 struct file *fp; 1129 struct stat *ub; 1130 struct proc *p; 1131 { 1132 struct pipe *pipe = (struct pipe *)fp->f_data; 1133 1134 bzero((caddr_t)ub, sizeof(*ub)); 1135 ub->st_mode = S_IFIFO; 1136 ub->st_blksize = pipe->pipe_buffer.size; 1137 ub->st_size = pipe->pipe_buffer.cnt; 1138 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1139 ub->st_atimespec = pipe->pipe_atime; 1140 ub->st_mtimespec = pipe->pipe_mtime; 1141 ub->st_ctimespec = pipe->pipe_ctime; 1142 ub->st_uid = fp->f_cred->cr_uid; 1143 ub->st_gid = fp->f_cred->cr_gid; 1144 /* 1145 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 1146 * XXX (st_dev, st_ino) should be unique. 1147 */ 1148 return (0); 1149 } 1150 1151 /* ARGSUSED */ 1152 static int 1153 pipe_close(fp, p) 1154 struct file *fp; 1155 struct proc *p; 1156 { 1157 struct pipe *cpipe = (struct pipe *)fp->f_data; 1158 1159 fp->f_ops = &badfileops; 1160 fp->f_data = NULL; 1161 funsetown(cpipe->pipe_sigio); 1162 pipeclose(cpipe); 1163 return (0); 1164 } 1165 1166 static void 1167 pipe_free_kmem(cpipe) 1168 struct pipe *cpipe; 1169 { 1170 1171 mtx_assert(&vm_mtx, MA_OWNED); 1172 if (cpipe->pipe_buffer.buffer != NULL) { 1173 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1174 --nbigpipe; 1175 amountpipekva -= cpipe->pipe_buffer.size; 1176 kmem_free(kernel_map, 1177 (vm_offset_t)cpipe->pipe_buffer.buffer, 1178 cpipe->pipe_buffer.size); 1179 cpipe->pipe_buffer.buffer = NULL; 1180 } 1181 #ifndef PIPE_NODIRECT 1182 if (cpipe->pipe_map.kva != NULL) { 1183 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE; 1184 kmem_free(kernel_map, 1185 cpipe->pipe_map.kva, 1186 cpipe->pipe_buffer.size + PAGE_SIZE); 1187 cpipe->pipe_map.cnt = 0; 1188 cpipe->pipe_map.kva = 0; 1189 cpipe->pipe_map.pos = 0; 1190 cpipe->pipe_map.npages = 0; 1191 } 1192 #endif 1193 } 1194 1195 /* 1196 * shutdown the pipe 1197 */ 1198 static void 1199 pipeclose(cpipe) 1200 struct pipe *cpipe; 1201 { 1202 struct pipe *ppipe; 1203 1204 if (cpipe) { 1205 1206 pipeselwakeup(cpipe); 1207 1208 /* 1209 * If the other side is blocked, wake it up saying that 1210 * we want to close it down. 1211 */ 1212 while (cpipe->pipe_busy) { 1213 wakeup(cpipe); 1214 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1215 tsleep(cpipe, PRIBIO, "pipecl", 0); 1216 } 1217 1218 /* 1219 * Disconnect from peer 1220 */ 1221 if ((ppipe = cpipe->pipe_peer) != NULL) { 1222 pipeselwakeup(ppipe); 1223 1224 ppipe->pipe_state |= PIPE_EOF; 1225 wakeup(ppipe); 1226 ppipe->pipe_peer = NULL; 1227 } 1228 /* 1229 * free resources 1230 */ 1231 mtx_lock(&vm_mtx); 1232 pipe_free_kmem(cpipe); 1233 /* XXX: erm, doesn't zalloc already have its own locks and 1234 * not need the giant vm lock? 1235 */ 1236 zfree(pipe_zone, cpipe); 1237 mtx_unlock(&vm_mtx); 1238 } 1239 } 1240 1241 /*ARGSUSED*/ 1242 static int 1243 pipe_kqfilter(struct file *fp, struct knote *kn) 1244 { 1245 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data; 1246 1247 switch (kn->kn_filter) { 1248 case EVFILT_READ: 1249 kn->kn_fop = &pipe_rfiltops; 1250 break; 1251 case EVFILT_WRITE: 1252 kn->kn_fop = &pipe_wfiltops; 1253 cpipe = cpipe->pipe_peer; 1254 break; 1255 default: 1256 return (1); 1257 } 1258 kn->kn_hook = (caddr_t)cpipe; 1259 1260 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1261 return (0); 1262 } 1263 1264 static void 1265 filt_pipedetach(struct knote *kn) 1266 { 1267 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1268 1269 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1270 } 1271 1272 /*ARGSUSED*/ 1273 static int 1274 filt_piperead(struct knote *kn, long hint) 1275 { 1276 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1277 struct pipe *wpipe = rpipe->pipe_peer; 1278 1279 kn->kn_data = rpipe->pipe_buffer.cnt; 1280 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1281 kn->kn_data = rpipe->pipe_map.cnt; 1282 1283 if ((rpipe->pipe_state & PIPE_EOF) || 1284 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1285 kn->kn_flags |= EV_EOF; 1286 return (1); 1287 } 1288 return (kn->kn_data > 0); 1289 } 1290 1291 /*ARGSUSED*/ 1292 static int 1293 filt_pipewrite(struct knote *kn, long hint) 1294 { 1295 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1296 struct pipe *wpipe = rpipe->pipe_peer; 1297 1298 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1299 kn->kn_data = 0; 1300 kn->kn_flags |= EV_EOF; 1301 return (1); 1302 } 1303 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1304 if (wpipe->pipe_state & PIPE_DIRECTW) 1305 kn->kn_data = 0; 1306 1307 return (kn->kn_data >= PIPE_BUF); 1308 } 1309