1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD$ 20 */ 21 22 /* 23 * This file contains a high-performance replacement for the socket-based 24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 25 * all features of sockets, but does do everything that pipes normally 26 * do. 27 */ 28 29 /* 30 * This code has two modes of operation, a small write mode and a large 31 * write mode. The small write mode acts like conventional pipes with 32 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 33 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 35 * the receiving process can copy it directly from the pages in the sending 36 * process. 37 * 38 * If the sending process receives a signal, it is possible that it will 39 * go away, and certainly its address space can change, because control 40 * is returned back to the user-mode side. In that case, the pipe code 41 * arranges to copy the buffer supplied by the user process, to a pageable 42 * kernel buffer, and the receiving process will grab the data from the 43 * pageable kernel buffer. Since signals don't happen all that often, 44 * the copy operation is normally eliminated. 45 * 46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 47 * happen for small transfers so that the system will not spend all of 48 * its time context switching. PIPE_SIZE is constrained by the 49 * amount of kernel virtual memory. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/filedesc.h> 57 #include <sys/filio.h> 58 #include <sys/lock.h> 59 #include <sys/mutex.h> 60 #include <sys/ttycom.h> 61 #include <sys/stat.h> 62 #include <sys/poll.h> 63 #include <sys/selinfo.h> 64 #include <sys/signalvar.h> 65 #include <sys/sysproto.h> 66 #include <sys/pipe.h> 67 #include <sys/proc.h> 68 #include <sys/vnode.h> 69 #include <sys/uio.h> 70 #include <sys/event.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_param.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_extern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_zone.h> 81 82 /* 83 * Use this define if you want to disable *fancy* VM things. Expect an 84 * approx 30% decrease in transfer rate. This could be useful for 85 * NetBSD or OpenBSD. 86 */ 87 /* #define PIPE_NODIRECT */ 88 89 /* 90 * interfaces to the outside world 91 */ 92 static int pipe_read __P((struct file *fp, struct uio *uio, 93 struct ucred *cred, int flags, struct thread *td)); 94 static int pipe_write __P((struct file *fp, struct uio *uio, 95 struct ucred *cred, int flags, struct thread *td)); 96 static int pipe_close __P((struct file *fp, struct thread *td)); 97 static int pipe_poll __P((struct file *fp, int events, struct ucred *cred, 98 struct thread *td)); 99 static int pipe_kqfilter __P((struct file *fp, struct knote *kn)); 100 static int pipe_stat __P((struct file *fp, struct stat *sb, struct thread *td)); 101 static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct thread *td)); 102 103 static struct fileops pipeops = { 104 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 105 pipe_stat, pipe_close 106 }; 107 108 static void filt_pipedetach(struct knote *kn); 109 static int filt_piperead(struct knote *kn, long hint); 110 static int filt_pipewrite(struct knote *kn, long hint); 111 112 static struct filterops pipe_rfiltops = 113 { 1, NULL, filt_pipedetach, filt_piperead }; 114 static struct filterops pipe_wfiltops = 115 { 1, NULL, filt_pipedetach, filt_pipewrite }; 116 117 118 /* 119 * Default pipe buffer size(s), this can be kind-of large now because pipe 120 * space is pageable. The pipe code will try to maintain locality of 121 * reference for performance reasons, so small amounts of outstanding I/O 122 * will not wipe the cache. 123 */ 124 #define MINPIPESIZE (PIPE_SIZE/3) 125 #define MAXPIPESIZE (2*PIPE_SIZE/3) 126 127 /* 128 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 129 * is there so that on large systems, we don't exhaust it. 130 */ 131 #define MAXPIPEKVA (8*1024*1024) 132 133 /* 134 * Limit for direct transfers, we cannot, of course limit 135 * the amount of kva for pipes in general though. 136 */ 137 #define LIMITPIPEKVA (16*1024*1024) 138 139 /* 140 * Limit the number of "big" pipes 141 */ 142 #define LIMITBIGPIPES 32 143 static int nbigpipe; 144 145 static int amountpipekva; 146 147 static void pipeclose __P((struct pipe *cpipe)); 148 static void pipe_free_kmem __P((struct pipe *cpipe)); 149 static int pipe_create __P((struct pipe **cpipep)); 150 static __inline int pipelock __P((struct pipe *cpipe, int catch)); 151 static __inline void pipeunlock __P((struct pipe *cpipe)); 152 static __inline void pipeselwakeup __P((struct pipe *cpipe)); 153 #ifndef PIPE_NODIRECT 154 static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio)); 155 static void pipe_destroy_write_buffer __P((struct pipe *wpipe)); 156 static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio)); 157 static void pipe_clone_write_buffer __P((struct pipe *wpipe)); 158 #endif 159 static int pipespace __P((struct pipe *cpipe, int size)); 160 161 static vm_zone_t pipe_zone; 162 163 /* 164 * The pipe system call for the DTYPE_PIPE type of pipes 165 */ 166 167 /* ARGSUSED */ 168 int 169 pipe(td, uap) 170 struct thread *td; 171 struct pipe_args /* { 172 int dummy; 173 } */ *uap; 174 { 175 struct filedesc *fdp = td->td_proc->p_fd; 176 struct file *rf, *wf; 177 struct pipe *rpipe, *wpipe; 178 int fd, error; 179 180 if (pipe_zone == NULL) 181 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4); 182 183 rpipe = wpipe = NULL; 184 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 185 pipeclose(rpipe); 186 pipeclose(wpipe); 187 return (ENFILE); 188 } 189 190 rpipe->pipe_state |= PIPE_DIRECTOK; 191 wpipe->pipe_state |= PIPE_DIRECTOK; 192 193 error = falloc(td, &rf, &fd); 194 if (error) { 195 pipeclose(rpipe); 196 pipeclose(wpipe); 197 return (error); 198 } 199 fhold(rf); 200 td->td_retval[0] = fd; 201 202 /* 203 * Warning: once we've gotten past allocation of the fd for the 204 * read-side, we can only drop the read side via fdrop() in order 205 * to avoid races against processes which manage to dup() the read 206 * side while we are blocked trying to allocate the write side. 207 */ 208 FILE_LOCK(rf); 209 rf->f_flag = FREAD | FWRITE; 210 rf->f_type = DTYPE_PIPE; 211 rf->f_data = (caddr_t)rpipe; 212 rf->f_ops = &pipeops; 213 FILE_UNLOCK(rf); 214 error = falloc(td, &wf, &fd); 215 if (error) { 216 FILEDESC_LOCK(fdp); 217 if (fdp->fd_ofiles[td->td_retval[0]] == rf) { 218 fdp->fd_ofiles[td->td_retval[0]] = NULL; 219 FILEDESC_UNLOCK(fdp); 220 fdrop(rf, td); 221 } else 222 FILEDESC_UNLOCK(fdp); 223 fdrop(rf, td); 224 /* rpipe has been closed by fdrop(). */ 225 pipeclose(wpipe); 226 return (error); 227 } 228 FILE_LOCK(wf); 229 wf->f_flag = FREAD | FWRITE; 230 wf->f_type = DTYPE_PIPE; 231 wf->f_data = (caddr_t)wpipe; 232 wf->f_ops = &pipeops; 233 FILE_UNLOCK(wf); 234 td->td_retval[1] = fd; 235 rpipe->pipe_peer = wpipe; 236 wpipe->pipe_peer = rpipe; 237 fdrop(rf, td); 238 239 return (0); 240 } 241 242 /* 243 * Allocate kva for pipe circular buffer, the space is pageable 244 * This routine will 'realloc' the size of a pipe safely, if it fails 245 * it will retain the old buffer. 246 * If it fails it will return ENOMEM. 247 */ 248 static int 249 pipespace(cpipe, size) 250 struct pipe *cpipe; 251 int size; 252 { 253 struct vm_object *object; 254 caddr_t buffer; 255 int npages, error; 256 257 GIANT_REQUIRED; 258 259 npages = round_page(size)/PAGE_SIZE; 260 /* 261 * Create an object, I don't like the idea of paging to/from 262 * kernel_object. 263 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 264 */ 265 object = vm_object_allocate(OBJT_DEFAULT, npages); 266 buffer = (caddr_t) vm_map_min(kernel_map); 267 268 /* 269 * Insert the object into the kernel map, and allocate kva for it. 270 * The map entry is, by default, pageable. 271 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 272 */ 273 error = vm_map_find(kernel_map, object, 0, 274 (vm_offset_t *) &buffer, size, 1, 275 VM_PROT_ALL, VM_PROT_ALL, 0); 276 277 if (error != KERN_SUCCESS) { 278 vm_object_deallocate(object); 279 return (ENOMEM); 280 } 281 282 /* free old resources if we're resizing */ 283 pipe_free_kmem(cpipe); 284 cpipe->pipe_buffer.object = object; 285 cpipe->pipe_buffer.buffer = buffer; 286 cpipe->pipe_buffer.size = size; 287 cpipe->pipe_buffer.in = 0; 288 cpipe->pipe_buffer.out = 0; 289 cpipe->pipe_buffer.cnt = 0; 290 amountpipekva += cpipe->pipe_buffer.size; 291 return (0); 292 } 293 294 /* 295 * initialize and allocate VM and memory for pipe 296 */ 297 static int 298 pipe_create(cpipep) 299 struct pipe **cpipep; 300 { 301 struct pipe *cpipe; 302 int error; 303 304 *cpipep = zalloc(pipe_zone); 305 if (*cpipep == NULL) 306 return (ENOMEM); 307 308 cpipe = *cpipep; 309 310 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */ 311 cpipe->pipe_buffer.object = NULL; 312 #ifndef PIPE_NODIRECT 313 cpipe->pipe_map.kva = NULL; 314 #endif 315 /* 316 * protect so pipeclose() doesn't follow a junk pointer 317 * if pipespace() fails. 318 */ 319 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel)); 320 cpipe->pipe_state = 0; 321 cpipe->pipe_peer = NULL; 322 cpipe->pipe_busy = 0; 323 324 #ifndef PIPE_NODIRECT 325 /* 326 * pipe data structure initializations to support direct pipe I/O 327 */ 328 cpipe->pipe_map.cnt = 0; 329 cpipe->pipe_map.kva = 0; 330 cpipe->pipe_map.pos = 0; 331 cpipe->pipe_map.npages = 0; 332 /* cpipe->pipe_map.ms[] = invalid */ 333 #endif 334 335 error = pipespace(cpipe, PIPE_SIZE); 336 if (error) 337 return (error); 338 339 vfs_timestamp(&cpipe->pipe_ctime); 340 cpipe->pipe_atime = cpipe->pipe_ctime; 341 cpipe->pipe_mtime = cpipe->pipe_ctime; 342 343 return (0); 344 } 345 346 347 /* 348 * lock a pipe for I/O, blocking other access 349 */ 350 static __inline int 351 pipelock(cpipe, catch) 352 struct pipe *cpipe; 353 int catch; 354 { 355 int error; 356 357 while (cpipe->pipe_state & PIPE_LOCK) { 358 cpipe->pipe_state |= PIPE_LWANT; 359 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO, 360 "pipelk", 0); 361 if (error != 0) 362 return (error); 363 } 364 cpipe->pipe_state |= PIPE_LOCK; 365 return (0); 366 } 367 368 /* 369 * unlock a pipe I/O lock 370 */ 371 static __inline void 372 pipeunlock(cpipe) 373 struct pipe *cpipe; 374 { 375 376 cpipe->pipe_state &= ~PIPE_LOCK; 377 if (cpipe->pipe_state & PIPE_LWANT) { 378 cpipe->pipe_state &= ~PIPE_LWANT; 379 wakeup(cpipe); 380 } 381 } 382 383 static __inline void 384 pipeselwakeup(cpipe) 385 struct pipe *cpipe; 386 { 387 388 if (cpipe->pipe_state & PIPE_SEL) { 389 cpipe->pipe_state &= ~PIPE_SEL; 390 selwakeup(&cpipe->pipe_sel); 391 } 392 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 393 pgsigio(cpipe->pipe_sigio, SIGIO, 0); 394 KNOTE(&cpipe->pipe_sel.si_note, 0); 395 } 396 397 /* ARGSUSED */ 398 static int 399 pipe_read(fp, uio, cred, flags, td) 400 struct file *fp; 401 struct uio *uio; 402 struct ucred *cred; 403 struct thread *td; 404 int flags; 405 { 406 struct pipe *rpipe = (struct pipe *) fp->f_data; 407 int error; 408 int nread = 0; 409 u_int size; 410 411 ++rpipe->pipe_busy; 412 error = pipelock(rpipe, 1); 413 if (error) 414 goto unlocked_error; 415 416 while (uio->uio_resid) { 417 /* 418 * normal pipe buffer receive 419 */ 420 if (rpipe->pipe_buffer.cnt > 0) { 421 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 422 if (size > rpipe->pipe_buffer.cnt) 423 size = rpipe->pipe_buffer.cnt; 424 if (size > (u_int) uio->uio_resid) 425 size = (u_int) uio->uio_resid; 426 427 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 428 size, uio); 429 if (error) 430 break; 431 432 rpipe->pipe_buffer.out += size; 433 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 434 rpipe->pipe_buffer.out = 0; 435 436 rpipe->pipe_buffer.cnt -= size; 437 438 /* 439 * If there is no more to read in the pipe, reset 440 * its pointers to the beginning. This improves 441 * cache hit stats. 442 */ 443 if (rpipe->pipe_buffer.cnt == 0) { 444 rpipe->pipe_buffer.in = 0; 445 rpipe->pipe_buffer.out = 0; 446 } 447 nread += size; 448 #ifndef PIPE_NODIRECT 449 /* 450 * Direct copy, bypassing a kernel buffer. 451 */ 452 } else if ((size = rpipe->pipe_map.cnt) && 453 (rpipe->pipe_state & PIPE_DIRECTW)) { 454 caddr_t va; 455 if (size > (u_int) uio->uio_resid) 456 size = (u_int) uio->uio_resid; 457 458 va = (caddr_t) rpipe->pipe_map.kva + 459 rpipe->pipe_map.pos; 460 error = uiomove(va, size, uio); 461 if (error) 462 break; 463 nread += size; 464 rpipe->pipe_map.pos += size; 465 rpipe->pipe_map.cnt -= size; 466 if (rpipe->pipe_map.cnt == 0) { 467 rpipe->pipe_state &= ~PIPE_DIRECTW; 468 wakeup(rpipe); 469 } 470 #endif 471 } else { 472 /* 473 * detect EOF condition 474 * read returns 0 on EOF, no need to set error 475 */ 476 if (rpipe->pipe_state & PIPE_EOF) 477 break; 478 479 /* 480 * If the "write-side" has been blocked, wake it up now. 481 */ 482 if (rpipe->pipe_state & PIPE_WANTW) { 483 rpipe->pipe_state &= ~PIPE_WANTW; 484 wakeup(rpipe); 485 } 486 487 /* 488 * Break if some data was read. 489 */ 490 if (nread > 0) 491 break; 492 493 /* 494 * Unlock the pipe buffer for our remaining processing. We 495 * will either break out with an error or we will sleep and 496 * relock to loop. 497 */ 498 pipeunlock(rpipe); 499 500 /* 501 * Handle non-blocking mode operation or 502 * wait for more data. 503 */ 504 FILE_LOCK(fp); 505 if (fp->f_flag & FNONBLOCK) { 506 FILE_UNLOCK(fp); 507 error = EAGAIN; 508 } else { 509 FILE_UNLOCK(fp); 510 rpipe->pipe_state |= PIPE_WANTR; 511 if ((error = tsleep(rpipe, PRIBIO | PCATCH, 512 "piperd", 0)) == 0) 513 error = pipelock(rpipe, 1); 514 } 515 if (error) 516 goto unlocked_error; 517 } 518 } 519 pipeunlock(rpipe); 520 521 if (error == 0) 522 vfs_timestamp(&rpipe->pipe_atime); 523 unlocked_error: 524 --rpipe->pipe_busy; 525 526 /* 527 * PIPE_WANT processing only makes sense if pipe_busy is 0. 528 */ 529 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 530 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 531 wakeup(rpipe); 532 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 533 /* 534 * Handle write blocking hysteresis. 535 */ 536 if (rpipe->pipe_state & PIPE_WANTW) { 537 rpipe->pipe_state &= ~PIPE_WANTW; 538 wakeup(rpipe); 539 } 540 } 541 542 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 543 pipeselwakeup(rpipe); 544 545 return (error); 546 } 547 548 #ifndef PIPE_NODIRECT 549 /* 550 * Map the sending processes' buffer into kernel space and wire it. 551 * This is similar to a physical write operation. 552 */ 553 static int 554 pipe_build_write_buffer(wpipe, uio) 555 struct pipe *wpipe; 556 struct uio *uio; 557 { 558 u_int size; 559 int i; 560 vm_offset_t addr, endaddr, paddr; 561 562 GIANT_REQUIRED; 563 564 size = (u_int) uio->uio_iov->iov_len; 565 if (size > wpipe->pipe_buffer.size) 566 size = wpipe->pipe_buffer.size; 567 568 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 569 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 570 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 571 vm_page_t m; 572 573 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 || 574 (paddr = pmap_kextract(addr)) == 0) { 575 int j; 576 577 for (j = 0; j < i; j++) 578 vm_page_unwire(wpipe->pipe_map.ms[j], 1); 579 return (EFAULT); 580 } 581 582 m = PHYS_TO_VM_PAGE(paddr); 583 vm_page_wire(m); 584 wpipe->pipe_map.ms[i] = m; 585 } 586 587 /* 588 * set up the control block 589 */ 590 wpipe->pipe_map.npages = i; 591 wpipe->pipe_map.pos = 592 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 593 wpipe->pipe_map.cnt = size; 594 595 /* 596 * and map the buffer 597 */ 598 if (wpipe->pipe_map.kva == 0) { 599 /* 600 * We need to allocate space for an extra page because the 601 * address range might (will) span pages at times. 602 */ 603 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map, 604 wpipe->pipe_buffer.size + PAGE_SIZE); 605 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE; 606 } 607 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms, 608 wpipe->pipe_map.npages); 609 610 /* 611 * and update the uio data 612 */ 613 614 uio->uio_iov->iov_len -= size; 615 uio->uio_iov->iov_base += size; 616 if (uio->uio_iov->iov_len == 0) 617 uio->uio_iov++; 618 uio->uio_resid -= size; 619 uio->uio_offset += size; 620 return (0); 621 } 622 623 /* 624 * unmap and unwire the process buffer 625 */ 626 static void 627 pipe_destroy_write_buffer(wpipe) 628 struct pipe *wpipe; 629 { 630 int i; 631 632 GIANT_REQUIRED; 633 634 if (wpipe->pipe_map.kva) { 635 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages); 636 637 if (amountpipekva > MAXPIPEKVA) { 638 vm_offset_t kva = wpipe->pipe_map.kva; 639 wpipe->pipe_map.kva = 0; 640 kmem_free(kernel_map, kva, 641 wpipe->pipe_buffer.size + PAGE_SIZE); 642 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE; 643 } 644 } 645 for (i = 0; i < wpipe->pipe_map.npages; i++) 646 vm_page_unwire(wpipe->pipe_map.ms[i], 1); 647 } 648 649 /* 650 * In the case of a signal, the writing process might go away. This 651 * code copies the data into the circular buffer so that the source 652 * pages can be freed without loss of data. 653 */ 654 static void 655 pipe_clone_write_buffer(wpipe) 656 struct pipe *wpipe; 657 { 658 int size; 659 int pos; 660 661 size = wpipe->pipe_map.cnt; 662 pos = wpipe->pipe_map.pos; 663 bcopy((caddr_t) wpipe->pipe_map.kva + pos, 664 (caddr_t) wpipe->pipe_buffer.buffer, size); 665 666 wpipe->pipe_buffer.in = size; 667 wpipe->pipe_buffer.out = 0; 668 wpipe->pipe_buffer.cnt = size; 669 wpipe->pipe_state &= ~PIPE_DIRECTW; 670 671 pipe_destroy_write_buffer(wpipe); 672 } 673 674 /* 675 * This implements the pipe buffer write mechanism. Note that only 676 * a direct write OR a normal pipe write can be pending at any given time. 677 * If there are any characters in the pipe buffer, the direct write will 678 * be deferred until the receiving process grabs all of the bytes from 679 * the pipe buffer. Then the direct mapping write is set-up. 680 */ 681 static int 682 pipe_direct_write(wpipe, uio) 683 struct pipe *wpipe; 684 struct uio *uio; 685 { 686 int error; 687 688 retry: 689 while (wpipe->pipe_state & PIPE_DIRECTW) { 690 if (wpipe->pipe_state & PIPE_WANTR) { 691 wpipe->pipe_state &= ~PIPE_WANTR; 692 wakeup(wpipe); 693 } 694 wpipe->pipe_state |= PIPE_WANTW; 695 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0); 696 if (error) 697 goto error1; 698 if (wpipe->pipe_state & PIPE_EOF) { 699 error = EPIPE; 700 goto error1; 701 } 702 } 703 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 704 if (wpipe->pipe_buffer.cnt > 0) { 705 if (wpipe->pipe_state & PIPE_WANTR) { 706 wpipe->pipe_state &= ~PIPE_WANTR; 707 wakeup(wpipe); 708 } 709 710 wpipe->pipe_state |= PIPE_WANTW; 711 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0); 712 if (error) 713 goto error1; 714 if (wpipe->pipe_state & PIPE_EOF) { 715 error = EPIPE; 716 goto error1; 717 } 718 goto retry; 719 } 720 721 wpipe->pipe_state |= PIPE_DIRECTW; 722 723 error = pipe_build_write_buffer(wpipe, uio); 724 if (error) { 725 wpipe->pipe_state &= ~PIPE_DIRECTW; 726 goto error1; 727 } 728 729 error = 0; 730 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 731 if (wpipe->pipe_state & PIPE_EOF) { 732 pipelock(wpipe, 0); 733 pipe_destroy_write_buffer(wpipe); 734 pipeunlock(wpipe); 735 pipeselwakeup(wpipe); 736 error = EPIPE; 737 goto error1; 738 } 739 if (wpipe->pipe_state & PIPE_WANTR) { 740 wpipe->pipe_state &= ~PIPE_WANTR; 741 wakeup(wpipe); 742 } 743 pipeselwakeup(wpipe); 744 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0); 745 } 746 747 pipelock(wpipe,0); 748 if (wpipe->pipe_state & PIPE_DIRECTW) { 749 /* 750 * this bit of trickery substitutes a kernel buffer for 751 * the process that might be going away. 752 */ 753 pipe_clone_write_buffer(wpipe); 754 } else { 755 pipe_destroy_write_buffer(wpipe); 756 } 757 pipeunlock(wpipe); 758 return (error); 759 760 error1: 761 wakeup(wpipe); 762 return (error); 763 } 764 #endif 765 766 static int 767 pipe_write(fp, uio, cred, flags, td) 768 struct file *fp; 769 struct uio *uio; 770 struct ucred *cred; 771 struct thread *td; 772 int flags; 773 { 774 int error = 0; 775 int orig_resid; 776 struct pipe *wpipe, *rpipe; 777 778 rpipe = (struct pipe *) fp->f_data; 779 wpipe = rpipe->pipe_peer; 780 781 /* 782 * detect loss of pipe read side, issue SIGPIPE if lost. 783 */ 784 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 785 return (EPIPE); 786 } 787 ++wpipe->pipe_busy; 788 789 /* 790 * If it is advantageous to resize the pipe buffer, do 791 * so. 792 */ 793 if ((uio->uio_resid > PIPE_SIZE) && 794 (nbigpipe < LIMITBIGPIPES) && 795 (wpipe->pipe_state & PIPE_DIRECTW) == 0 && 796 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 797 (wpipe->pipe_buffer.cnt == 0)) { 798 799 if ((error = pipelock(wpipe,1)) == 0) { 800 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 801 nbigpipe++; 802 pipeunlock(wpipe); 803 } 804 } 805 806 /* 807 * If an early error occured unbusy and return, waking up any pending 808 * readers. 809 */ 810 if (error) { 811 --wpipe->pipe_busy; 812 if ((wpipe->pipe_busy == 0) && 813 (wpipe->pipe_state & PIPE_WANT)) { 814 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 815 wakeup(wpipe); 816 } 817 return(error); 818 } 819 820 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 821 822 orig_resid = uio->uio_resid; 823 824 while (uio->uio_resid) { 825 int space; 826 827 #ifndef PIPE_NODIRECT 828 /* 829 * If the transfer is large, we can gain performance if 830 * we do process-to-process copies directly. 831 * If the write is non-blocking, we don't use the 832 * direct write mechanism. 833 * 834 * The direct write mechanism will detect the reader going 835 * away on us. 836 */ 837 FILE_LOCK(fp); 838 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) && 839 (fp->f_flag & FNONBLOCK) == 0 && 840 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) && 841 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) { 842 FILE_UNLOCK(fp); 843 error = pipe_direct_write( wpipe, uio); 844 if (error) 845 break; 846 continue; 847 } else 848 FILE_UNLOCK(fp); 849 #endif 850 851 /* 852 * Pipe buffered writes cannot be coincidental with 853 * direct writes. We wait until the currently executing 854 * direct write is completed before we start filling the 855 * pipe buffer. We break out if a signal occurs or the 856 * reader goes away. 857 */ 858 retrywrite: 859 while (wpipe->pipe_state & PIPE_DIRECTW) { 860 if (wpipe->pipe_state & PIPE_WANTR) { 861 wpipe->pipe_state &= ~PIPE_WANTR; 862 wakeup(wpipe); 863 } 864 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0); 865 if (wpipe->pipe_state & PIPE_EOF) 866 break; 867 if (error) 868 break; 869 } 870 if (wpipe->pipe_state & PIPE_EOF) { 871 error = EPIPE; 872 break; 873 } 874 875 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 876 877 /* Writes of size <= PIPE_BUF must be atomic. */ 878 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 879 space = 0; 880 881 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) { 882 if ((error = pipelock(wpipe,1)) == 0) { 883 int size; /* Transfer size */ 884 int segsize; /* first segment to transfer */ 885 886 /* 887 * It is possible for a direct write to 888 * slip in on us... handle it here... 889 */ 890 if (wpipe->pipe_state & PIPE_DIRECTW) { 891 pipeunlock(wpipe); 892 goto retrywrite; 893 } 894 /* 895 * If a process blocked in uiomove, our 896 * value for space might be bad. 897 * 898 * XXX will we be ok if the reader has gone 899 * away here? 900 */ 901 if (space > wpipe->pipe_buffer.size - 902 wpipe->pipe_buffer.cnt) { 903 pipeunlock(wpipe); 904 goto retrywrite; 905 } 906 907 /* 908 * Transfer size is minimum of uio transfer 909 * and free space in pipe buffer. 910 */ 911 if (space > uio->uio_resid) 912 size = uio->uio_resid; 913 else 914 size = space; 915 /* 916 * First segment to transfer is minimum of 917 * transfer size and contiguous space in 918 * pipe buffer. If first segment to transfer 919 * is less than the transfer size, we've got 920 * a wraparound in the buffer. 921 */ 922 segsize = wpipe->pipe_buffer.size - 923 wpipe->pipe_buffer.in; 924 if (segsize > size) 925 segsize = size; 926 927 /* Transfer first segment */ 928 929 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 930 segsize, uio); 931 932 if (error == 0 && segsize < size) { 933 /* 934 * Transfer remaining part now, to 935 * support atomic writes. Wraparound 936 * happened. 937 */ 938 if (wpipe->pipe_buffer.in + segsize != 939 wpipe->pipe_buffer.size) 940 panic("Expected pipe buffer wraparound disappeared"); 941 942 error = uiomove(&wpipe->pipe_buffer.buffer[0], 943 size - segsize, uio); 944 } 945 if (error == 0) { 946 wpipe->pipe_buffer.in += size; 947 if (wpipe->pipe_buffer.in >= 948 wpipe->pipe_buffer.size) { 949 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 950 panic("Expected wraparound bad"); 951 wpipe->pipe_buffer.in = size - segsize; 952 } 953 954 wpipe->pipe_buffer.cnt += size; 955 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 956 panic("Pipe buffer overflow"); 957 958 } 959 pipeunlock(wpipe); 960 } 961 if (error) 962 break; 963 964 } else { 965 /* 966 * If the "read-side" has been blocked, wake it up now. 967 */ 968 if (wpipe->pipe_state & PIPE_WANTR) { 969 wpipe->pipe_state &= ~PIPE_WANTR; 970 wakeup(wpipe); 971 } 972 973 /* 974 * don't block on non-blocking I/O 975 */ 976 FILE_LOCK(fp); 977 if (fp->f_flag & FNONBLOCK) { 978 FILE_UNLOCK(fp); 979 error = EAGAIN; 980 break; 981 } 982 FILE_UNLOCK(fp); 983 984 /* 985 * We have no more space and have something to offer, 986 * wake up select/poll. 987 */ 988 pipeselwakeup(wpipe); 989 990 wpipe->pipe_state |= PIPE_WANTW; 991 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0); 992 if (error != 0) 993 break; 994 /* 995 * If read side wants to go away, we just issue a signal 996 * to ourselves. 997 */ 998 if (wpipe->pipe_state & PIPE_EOF) { 999 error = EPIPE; 1000 break; 1001 } 1002 } 1003 } 1004 1005 --wpipe->pipe_busy; 1006 1007 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1008 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1009 wakeup(wpipe); 1010 } else if (wpipe->pipe_buffer.cnt > 0) { 1011 /* 1012 * If we have put any characters in the buffer, we wake up 1013 * the reader. 1014 */ 1015 if (wpipe->pipe_state & PIPE_WANTR) { 1016 wpipe->pipe_state &= ~PIPE_WANTR; 1017 wakeup(wpipe); 1018 } 1019 } 1020 1021 /* 1022 * Don't return EPIPE if I/O was successful 1023 */ 1024 if ((wpipe->pipe_buffer.cnt == 0) && 1025 (uio->uio_resid == 0) && 1026 (error == EPIPE)) { 1027 error = 0; 1028 } 1029 1030 if (error == 0) 1031 vfs_timestamp(&wpipe->pipe_mtime); 1032 1033 /* 1034 * We have something to offer, 1035 * wake up select/poll. 1036 */ 1037 if (wpipe->pipe_buffer.cnt) 1038 pipeselwakeup(wpipe); 1039 1040 return (error); 1041 } 1042 1043 /* 1044 * we implement a very minimal set of ioctls for compatibility with sockets. 1045 */ 1046 int 1047 pipe_ioctl(fp, cmd, data, td) 1048 struct file *fp; 1049 u_long cmd; 1050 caddr_t data; 1051 struct thread *td; 1052 { 1053 struct pipe *mpipe = (struct pipe *)fp->f_data; 1054 1055 switch (cmd) { 1056 1057 case FIONBIO: 1058 return (0); 1059 1060 case FIOASYNC: 1061 if (*(int *)data) { 1062 mpipe->pipe_state |= PIPE_ASYNC; 1063 } else { 1064 mpipe->pipe_state &= ~PIPE_ASYNC; 1065 } 1066 return (0); 1067 1068 case FIONREAD: 1069 if (mpipe->pipe_state & PIPE_DIRECTW) 1070 *(int *)data = mpipe->pipe_map.cnt; 1071 else 1072 *(int *)data = mpipe->pipe_buffer.cnt; 1073 return (0); 1074 1075 case FIOSETOWN: 1076 return (fsetown(*(int *)data, &mpipe->pipe_sigio)); 1077 1078 case FIOGETOWN: 1079 *(int *)data = fgetown(mpipe->pipe_sigio); 1080 return (0); 1081 1082 /* This is deprecated, FIOSETOWN should be used instead. */ 1083 case TIOCSPGRP: 1084 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio)); 1085 1086 /* This is deprecated, FIOGETOWN should be used instead. */ 1087 case TIOCGPGRP: 1088 *(int *)data = -fgetown(mpipe->pipe_sigio); 1089 return (0); 1090 1091 } 1092 return (ENOTTY); 1093 } 1094 1095 int 1096 pipe_poll(fp, events, cred, td) 1097 struct file *fp; 1098 int events; 1099 struct ucred *cred; 1100 struct thread *td; 1101 { 1102 struct pipe *rpipe = (struct pipe *)fp->f_data; 1103 struct pipe *wpipe; 1104 int revents = 0; 1105 1106 wpipe = rpipe->pipe_peer; 1107 if (events & (POLLIN | POLLRDNORM)) 1108 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1109 (rpipe->pipe_buffer.cnt > 0) || 1110 (rpipe->pipe_state & PIPE_EOF)) 1111 revents |= events & (POLLIN | POLLRDNORM); 1112 1113 if (events & (POLLOUT | POLLWRNORM)) 1114 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1115 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1116 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1117 revents |= events & (POLLOUT | POLLWRNORM); 1118 1119 if ((rpipe->pipe_state & PIPE_EOF) || 1120 (wpipe == NULL) || 1121 (wpipe->pipe_state & PIPE_EOF)) 1122 revents |= POLLHUP; 1123 1124 if (revents == 0) { 1125 if (events & (POLLIN | POLLRDNORM)) { 1126 selrecord(td, &rpipe->pipe_sel); 1127 rpipe->pipe_state |= PIPE_SEL; 1128 } 1129 1130 if (events & (POLLOUT | POLLWRNORM)) { 1131 selrecord(td, &wpipe->pipe_sel); 1132 wpipe->pipe_state |= PIPE_SEL; 1133 } 1134 } 1135 1136 return (revents); 1137 } 1138 1139 static int 1140 pipe_stat(fp, ub, td) 1141 struct file *fp; 1142 struct stat *ub; 1143 struct thread *td; 1144 { 1145 struct pipe *pipe = (struct pipe *)fp->f_data; 1146 1147 bzero((caddr_t)ub, sizeof(*ub)); 1148 ub->st_mode = S_IFIFO; 1149 ub->st_blksize = pipe->pipe_buffer.size; 1150 ub->st_size = pipe->pipe_buffer.cnt; 1151 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1152 ub->st_atimespec = pipe->pipe_atime; 1153 ub->st_mtimespec = pipe->pipe_mtime; 1154 ub->st_ctimespec = pipe->pipe_ctime; 1155 ub->st_uid = fp->f_cred->cr_uid; 1156 ub->st_gid = fp->f_cred->cr_gid; 1157 /* 1158 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 1159 * XXX (st_dev, st_ino) should be unique. 1160 */ 1161 return (0); 1162 } 1163 1164 /* ARGSUSED */ 1165 static int 1166 pipe_close(fp, td) 1167 struct file *fp; 1168 struct thread *td; 1169 { 1170 struct pipe *cpipe = (struct pipe *)fp->f_data; 1171 1172 fp->f_ops = &badfileops; 1173 fp->f_data = NULL; 1174 funsetown(cpipe->pipe_sigio); 1175 pipeclose(cpipe); 1176 return (0); 1177 } 1178 1179 static void 1180 pipe_free_kmem(cpipe) 1181 struct pipe *cpipe; 1182 { 1183 GIANT_REQUIRED; 1184 1185 if (cpipe->pipe_buffer.buffer != NULL) { 1186 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1187 --nbigpipe; 1188 amountpipekva -= cpipe->pipe_buffer.size; 1189 kmem_free(kernel_map, 1190 (vm_offset_t)cpipe->pipe_buffer.buffer, 1191 cpipe->pipe_buffer.size); 1192 cpipe->pipe_buffer.buffer = NULL; 1193 } 1194 #ifndef PIPE_NODIRECT 1195 if (cpipe->pipe_map.kva != NULL) { 1196 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE; 1197 kmem_free(kernel_map, 1198 cpipe->pipe_map.kva, 1199 cpipe->pipe_buffer.size + PAGE_SIZE); 1200 cpipe->pipe_map.cnt = 0; 1201 cpipe->pipe_map.kva = 0; 1202 cpipe->pipe_map.pos = 0; 1203 cpipe->pipe_map.npages = 0; 1204 } 1205 #endif 1206 } 1207 1208 /* 1209 * shutdown the pipe 1210 */ 1211 static void 1212 pipeclose(cpipe) 1213 struct pipe *cpipe; 1214 { 1215 struct pipe *ppipe; 1216 1217 if (cpipe) { 1218 1219 pipeselwakeup(cpipe); 1220 1221 /* 1222 * If the other side is blocked, wake it up saying that 1223 * we want to close it down. 1224 */ 1225 while (cpipe->pipe_busy) { 1226 wakeup(cpipe); 1227 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1228 tsleep(cpipe, PRIBIO, "pipecl", 0); 1229 } 1230 1231 /* 1232 * Disconnect from peer 1233 */ 1234 if ((ppipe = cpipe->pipe_peer) != NULL) { 1235 pipeselwakeup(ppipe); 1236 1237 ppipe->pipe_state |= PIPE_EOF; 1238 wakeup(ppipe); 1239 KNOTE(&ppipe->pipe_sel.si_note, 0); 1240 ppipe->pipe_peer = NULL; 1241 } 1242 /* 1243 * free resources 1244 */ 1245 pipe_free_kmem(cpipe); 1246 zfree(pipe_zone, cpipe); 1247 } 1248 } 1249 1250 /*ARGSUSED*/ 1251 static int 1252 pipe_kqfilter(struct file *fp, struct knote *kn) 1253 { 1254 struct pipe *cpipe; 1255 1256 cpipe = (struct pipe *)kn->kn_fp->f_data; 1257 switch (kn->kn_filter) { 1258 case EVFILT_READ: 1259 kn->kn_fop = &pipe_rfiltops; 1260 break; 1261 case EVFILT_WRITE: 1262 kn->kn_fop = &pipe_wfiltops; 1263 cpipe = cpipe->pipe_peer; 1264 break; 1265 default: 1266 return (1); 1267 } 1268 kn->kn_hook = (caddr_t)cpipe; 1269 1270 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1271 return (0); 1272 } 1273 1274 static void 1275 filt_pipedetach(struct knote *kn) 1276 { 1277 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1278 1279 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1280 } 1281 1282 /*ARGSUSED*/ 1283 static int 1284 filt_piperead(struct knote *kn, long hint) 1285 { 1286 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1287 struct pipe *wpipe = rpipe->pipe_peer; 1288 1289 kn->kn_data = rpipe->pipe_buffer.cnt; 1290 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1291 kn->kn_data = rpipe->pipe_map.cnt; 1292 1293 if ((rpipe->pipe_state & PIPE_EOF) || 1294 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1295 kn->kn_flags |= EV_EOF; 1296 return (1); 1297 } 1298 return (kn->kn_data > 0); 1299 } 1300 1301 /*ARGSUSED*/ 1302 static int 1303 filt_pipewrite(struct knote *kn, long hint) 1304 { 1305 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1306 struct pipe *wpipe = rpipe->pipe_peer; 1307 1308 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1309 kn->kn_data = 0; 1310 kn->kn_flags |= EV_EOF; 1311 return (1); 1312 } 1313 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1314 if (wpipe->pipe_state & PIPE_DIRECTW) 1315 kn->kn_data = 0; 1316 1317 return (kn->kn_data >= PIPE_BUF); 1318 } 1319