1 /* 2 * Copyright (c) 1996 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. Modifications may be freely made to this file if the above conditions 17 * are met. 18 * 19 * $FreeBSD$ 20 */ 21 22 /* 23 * This file contains a high-performance replacement for the socket-based 24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support 25 * all features of sockets, but does do everything that pipes normally 26 * do. 27 */ 28 29 /* 30 * This code has two modes of operation, a small write mode and a large 31 * write mode. The small write mode acts like conventional pipes with 32 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the 33 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT 34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and 35 * the receiving process can copy it directly from the pages in the sending 36 * process. 37 * 38 * If the sending process receives a signal, it is possible that it will 39 * go away, and certainly its address space can change, because control 40 * is returned back to the user-mode side. In that case, the pipe code 41 * arranges to copy the buffer supplied by the user process, to a pageable 42 * kernel buffer, and the receiving process will grab the data from the 43 * pageable kernel buffer. Since signals don't happen all that often, 44 * the copy operation is normally eliminated. 45 * 46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will 47 * happen for small transfers so that the system will not spend all of 48 * its time context switching. PIPE_SIZE is constrained by the 49 * amount of kernel virtual memory. 50 */ 51 52 #include "opt_mac.h" 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/filio.h> 60 #include <sys/kernel.h> 61 #include <sys/lock.h> 62 #include <sys/mac.h> 63 #include <sys/mutex.h> 64 #include <sys/ttycom.h> 65 #include <sys/stat.h> 66 #include <sys/malloc.h> 67 #include <sys/poll.h> 68 #include <sys/selinfo.h> 69 #include <sys/signalvar.h> 70 #include <sys/sysproto.h> 71 #include <sys/pipe.h> 72 #include <sys/proc.h> 73 #include <sys/vnode.h> 74 #include <sys/uio.h> 75 #include <sys/event.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <vm/vm_object.h> 80 #include <vm/vm_kern.h> 81 #include <vm/vm_extern.h> 82 #include <vm/pmap.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_page.h> 85 #include <vm/uma.h> 86 87 /* 88 * Use this define if you want to disable *fancy* VM things. Expect an 89 * approx 30% decrease in transfer rate. This could be useful for 90 * NetBSD or OpenBSD. 91 */ 92 /* #define PIPE_NODIRECT */ 93 94 /* 95 * interfaces to the outside world 96 */ 97 static int pipe_read(struct file *fp, struct uio *uio, 98 struct ucred *active_cred, int flags, struct thread *td); 99 static int pipe_write(struct file *fp, struct uio *uio, 100 struct ucred *active_cred, int flags, struct thread *td); 101 static int pipe_close(struct file *fp, struct thread *td); 102 static int pipe_poll(struct file *fp, int events, struct ucred *active_cred, 103 struct thread *td); 104 static int pipe_kqfilter(struct file *fp, struct knote *kn); 105 static int pipe_stat(struct file *fp, struct stat *sb, 106 struct ucred *active_cred, struct thread *td); 107 static int pipe_ioctl(struct file *fp, u_long cmd, void *data, 108 struct ucred *active_cred, struct thread *td); 109 110 static struct fileops pipeops = { 111 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter, 112 pipe_stat, pipe_close 113 }; 114 115 static void filt_pipedetach(struct knote *kn); 116 static int filt_piperead(struct knote *kn, long hint); 117 static int filt_pipewrite(struct knote *kn, long hint); 118 119 static struct filterops pipe_rfiltops = 120 { 1, NULL, filt_pipedetach, filt_piperead }; 121 static struct filterops pipe_wfiltops = 122 { 1, NULL, filt_pipedetach, filt_pipewrite }; 123 124 #define PIPE_GET_GIANT(pipe) \ 125 do { \ 126 KASSERT(((pipe)->pipe_state & PIPE_LOCKFL) != 0, \ 127 ("%s:%d PIPE_GET_GIANT: line pipe not locked", \ 128 __FILE__, __LINE__)); \ 129 PIPE_UNLOCK(pipe); \ 130 mtx_lock(&Giant); \ 131 } while (0) 132 133 #define PIPE_DROP_GIANT(pipe) \ 134 do { \ 135 mtx_unlock(&Giant); \ 136 PIPE_LOCK(pipe); \ 137 } while (0) 138 139 /* 140 * Default pipe buffer size(s), this can be kind-of large now because pipe 141 * space is pageable. The pipe code will try to maintain locality of 142 * reference for performance reasons, so small amounts of outstanding I/O 143 * will not wipe the cache. 144 */ 145 #define MINPIPESIZE (PIPE_SIZE/3) 146 #define MAXPIPESIZE (2*PIPE_SIZE/3) 147 148 /* 149 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but 150 * is there so that on large systems, we don't exhaust it. 151 */ 152 #define MAXPIPEKVA (8*1024*1024) 153 154 /* 155 * Limit for direct transfers, we cannot, of course limit 156 * the amount of kva for pipes in general though. 157 */ 158 #define LIMITPIPEKVA (16*1024*1024) 159 160 /* 161 * Limit the number of "big" pipes 162 */ 163 #define LIMITBIGPIPES 32 164 static int nbigpipe; 165 166 static int amountpipekva; 167 168 static void pipeinit(void *dummy __unused); 169 static void pipeclose(struct pipe *cpipe); 170 static void pipe_free_kmem(struct pipe *cpipe); 171 static int pipe_create(struct pipe **cpipep); 172 static __inline int pipelock(struct pipe *cpipe, int catch); 173 static __inline void pipeunlock(struct pipe *cpipe); 174 static __inline void pipeselwakeup(struct pipe *cpipe); 175 #ifndef PIPE_NODIRECT 176 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio); 177 static void pipe_destroy_write_buffer(struct pipe *wpipe); 178 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio); 179 static void pipe_clone_write_buffer(struct pipe *wpipe); 180 #endif 181 static int pipespace(struct pipe *cpipe, int size); 182 183 static uma_zone_t pipe_zone; 184 185 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); 186 187 static void 188 pipeinit(void *dummy __unused) 189 { 190 pipe_zone = uma_zcreate("PIPE", sizeof(struct pipe), NULL, 191 NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 192 } 193 194 /* 195 * The pipe system call for the DTYPE_PIPE type of pipes 196 */ 197 198 /* ARGSUSED */ 199 int 200 pipe(td, uap) 201 struct thread *td; 202 struct pipe_args /* { 203 int dummy; 204 } */ *uap; 205 { 206 struct filedesc *fdp = td->td_proc->p_fd; 207 struct file *rf, *wf; 208 struct pipe *rpipe, *wpipe; 209 struct mtx *pmtx; 210 int fd, error; 211 212 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized")); 213 214 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO); 215 216 rpipe = wpipe = NULL; 217 if (pipe_create(&rpipe) || pipe_create(&wpipe)) { 218 pipeclose(rpipe); 219 pipeclose(wpipe); 220 free(pmtx, M_TEMP); 221 return (ENFILE); 222 } 223 224 rpipe->pipe_state |= PIPE_DIRECTOK; 225 wpipe->pipe_state |= PIPE_DIRECTOK; 226 227 error = falloc(td, &rf, &fd); 228 if (error) { 229 pipeclose(rpipe); 230 pipeclose(wpipe); 231 free(pmtx, M_TEMP); 232 return (error); 233 } 234 fhold(rf); 235 td->td_retval[0] = fd; 236 237 /* 238 * Warning: once we've gotten past allocation of the fd for the 239 * read-side, we can only drop the read side via fdrop() in order 240 * to avoid races against processes which manage to dup() the read 241 * side while we are blocked trying to allocate the write side. 242 */ 243 FILE_LOCK(rf); 244 rf->f_flag = FREAD | FWRITE; 245 rf->f_type = DTYPE_PIPE; 246 rf->f_data = rpipe; 247 rf->f_ops = &pipeops; 248 FILE_UNLOCK(rf); 249 error = falloc(td, &wf, &fd); 250 if (error) { 251 FILEDESC_LOCK(fdp); 252 if (fdp->fd_ofiles[td->td_retval[0]] == rf) { 253 fdp->fd_ofiles[td->td_retval[0]] = NULL; 254 FILEDESC_UNLOCK(fdp); 255 fdrop(rf, td); 256 } else 257 FILEDESC_UNLOCK(fdp); 258 fdrop(rf, td); 259 /* rpipe has been closed by fdrop(). */ 260 pipeclose(wpipe); 261 free(pmtx, M_TEMP); 262 return (error); 263 } 264 FILE_LOCK(wf); 265 wf->f_flag = FREAD | FWRITE; 266 wf->f_type = DTYPE_PIPE; 267 wf->f_data = wpipe; 268 wf->f_ops = &pipeops; 269 FILE_UNLOCK(wf); 270 td->td_retval[1] = fd; 271 rpipe->pipe_peer = wpipe; 272 wpipe->pipe_peer = rpipe; 273 #ifdef MAC 274 /* 275 * struct pipe represents a pipe endpoint. The MAC label is shared 276 * between the connected endpoints. As a result mac_init_pipe() and 277 * mac_create_pipe() should only be called on one of the endpoints 278 * after they have been connected. 279 */ 280 mac_init_pipe(rpipe); 281 mac_create_pipe(td->td_ucred, rpipe); 282 #endif 283 mtx_init(pmtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE); 284 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; 285 fdrop(rf, td); 286 287 return (0); 288 } 289 290 /* 291 * Allocate kva for pipe circular buffer, the space is pageable 292 * This routine will 'realloc' the size of a pipe safely, if it fails 293 * it will retain the old buffer. 294 * If it fails it will return ENOMEM. 295 */ 296 static int 297 pipespace(cpipe, size) 298 struct pipe *cpipe; 299 int size; 300 { 301 struct vm_object *object; 302 caddr_t buffer; 303 int npages, error; 304 305 GIANT_REQUIRED; 306 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)), 307 ("pipespace: pipe mutex locked")); 308 309 npages = round_page(size)/PAGE_SIZE; 310 /* 311 * Create an object, I don't like the idea of paging to/from 312 * kernel_object. 313 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 314 */ 315 object = vm_object_allocate(OBJT_DEFAULT, npages); 316 buffer = (caddr_t) vm_map_min(kernel_map); 317 318 /* 319 * Insert the object into the kernel map, and allocate kva for it. 320 * The map entry is, by default, pageable. 321 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems. 322 */ 323 error = vm_map_find(kernel_map, object, 0, 324 (vm_offset_t *) &buffer, size, 1, 325 VM_PROT_ALL, VM_PROT_ALL, 0); 326 327 if (error != KERN_SUCCESS) { 328 vm_object_deallocate(object); 329 return (ENOMEM); 330 } 331 332 /* free old resources if we're resizing */ 333 pipe_free_kmem(cpipe); 334 cpipe->pipe_buffer.object = object; 335 cpipe->pipe_buffer.buffer = buffer; 336 cpipe->pipe_buffer.size = size; 337 cpipe->pipe_buffer.in = 0; 338 cpipe->pipe_buffer.out = 0; 339 cpipe->pipe_buffer.cnt = 0; 340 amountpipekva += cpipe->pipe_buffer.size; 341 return (0); 342 } 343 344 /* 345 * initialize and allocate VM and memory for pipe 346 */ 347 static int 348 pipe_create(cpipep) 349 struct pipe **cpipep; 350 { 351 struct pipe *cpipe; 352 int error; 353 354 *cpipep = uma_zalloc(pipe_zone, M_WAITOK); 355 if (*cpipep == NULL) 356 return (ENOMEM); 357 358 cpipe = *cpipep; 359 360 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */ 361 cpipe->pipe_buffer.object = NULL; 362 #ifndef PIPE_NODIRECT 363 cpipe->pipe_map.kva = 0; 364 #endif 365 /* 366 * protect so pipeclose() doesn't follow a junk pointer 367 * if pipespace() fails. 368 */ 369 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel)); 370 cpipe->pipe_state = 0; 371 cpipe->pipe_peer = NULL; 372 cpipe->pipe_busy = 0; 373 374 #ifndef PIPE_NODIRECT 375 /* 376 * pipe data structure initializations to support direct pipe I/O 377 */ 378 cpipe->pipe_map.cnt = 0; 379 cpipe->pipe_map.kva = 0; 380 cpipe->pipe_map.pos = 0; 381 cpipe->pipe_map.npages = 0; 382 /* cpipe->pipe_map.ms[] = invalid */ 383 #endif 384 385 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */ 386 error = pipespace(cpipe, PIPE_SIZE); 387 if (error) 388 return (error); 389 390 vfs_timestamp(&cpipe->pipe_ctime); 391 cpipe->pipe_atime = cpipe->pipe_ctime; 392 cpipe->pipe_mtime = cpipe->pipe_ctime; 393 394 return (0); 395 } 396 397 398 /* 399 * lock a pipe for I/O, blocking other access 400 */ 401 static __inline int 402 pipelock(cpipe, catch) 403 struct pipe *cpipe; 404 int catch; 405 { 406 int error; 407 408 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 409 while (cpipe->pipe_state & PIPE_LOCKFL) { 410 cpipe->pipe_state |= PIPE_LWANT; 411 error = msleep(cpipe, PIPE_MTX(cpipe), 412 catch ? (PRIBIO | PCATCH) : PRIBIO, 413 "pipelk", 0); 414 if (error != 0) 415 return (error); 416 } 417 cpipe->pipe_state |= PIPE_LOCKFL; 418 return (0); 419 } 420 421 /* 422 * unlock a pipe I/O lock 423 */ 424 static __inline void 425 pipeunlock(cpipe) 426 struct pipe *cpipe; 427 { 428 429 PIPE_LOCK_ASSERT(cpipe, MA_OWNED); 430 cpipe->pipe_state &= ~PIPE_LOCKFL; 431 if (cpipe->pipe_state & PIPE_LWANT) { 432 cpipe->pipe_state &= ~PIPE_LWANT; 433 wakeup(cpipe); 434 } 435 } 436 437 static __inline void 438 pipeselwakeup(cpipe) 439 struct pipe *cpipe; 440 { 441 442 if (cpipe->pipe_state & PIPE_SEL) { 443 cpipe->pipe_state &= ~PIPE_SEL; 444 selwakeup(&cpipe->pipe_sel); 445 } 446 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) 447 pgsigio(&cpipe->pipe_sigio, SIGIO, 0); 448 KNOTE(&cpipe->pipe_sel.si_note, 0); 449 } 450 451 /* ARGSUSED */ 452 static int 453 pipe_read(fp, uio, active_cred, flags, td) 454 struct file *fp; 455 struct uio *uio; 456 struct ucred *active_cred; 457 struct thread *td; 458 int flags; 459 { 460 struct pipe *rpipe = (struct pipe *) fp->f_data; 461 int error; 462 int nread = 0; 463 u_int size; 464 465 PIPE_LOCK(rpipe); 466 ++rpipe->pipe_busy; 467 error = pipelock(rpipe, 1); 468 if (error) 469 goto unlocked_error; 470 471 #ifdef MAC 472 error = mac_check_pipe_read(active_cred, rpipe); 473 if (error) 474 goto locked_error; 475 #endif 476 477 while (uio->uio_resid) { 478 /* 479 * normal pipe buffer receive 480 */ 481 if (rpipe->pipe_buffer.cnt > 0) { 482 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; 483 if (size > rpipe->pipe_buffer.cnt) 484 size = rpipe->pipe_buffer.cnt; 485 if (size > (u_int) uio->uio_resid) 486 size = (u_int) uio->uio_resid; 487 488 PIPE_UNLOCK(rpipe); 489 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], 490 size, uio); 491 PIPE_LOCK(rpipe); 492 if (error) 493 break; 494 495 rpipe->pipe_buffer.out += size; 496 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) 497 rpipe->pipe_buffer.out = 0; 498 499 rpipe->pipe_buffer.cnt -= size; 500 501 /* 502 * If there is no more to read in the pipe, reset 503 * its pointers to the beginning. This improves 504 * cache hit stats. 505 */ 506 if (rpipe->pipe_buffer.cnt == 0) { 507 rpipe->pipe_buffer.in = 0; 508 rpipe->pipe_buffer.out = 0; 509 } 510 nread += size; 511 #ifndef PIPE_NODIRECT 512 /* 513 * Direct copy, bypassing a kernel buffer. 514 */ 515 } else if ((size = rpipe->pipe_map.cnt) && 516 (rpipe->pipe_state & PIPE_DIRECTW)) { 517 caddr_t va; 518 if (size > (u_int) uio->uio_resid) 519 size = (u_int) uio->uio_resid; 520 521 va = (caddr_t) rpipe->pipe_map.kva + 522 rpipe->pipe_map.pos; 523 PIPE_UNLOCK(rpipe); 524 error = uiomove(va, size, uio); 525 PIPE_LOCK(rpipe); 526 if (error) 527 break; 528 nread += size; 529 rpipe->pipe_map.pos += size; 530 rpipe->pipe_map.cnt -= size; 531 if (rpipe->pipe_map.cnt == 0) { 532 rpipe->pipe_state &= ~PIPE_DIRECTW; 533 wakeup(rpipe); 534 } 535 #endif 536 } else { 537 /* 538 * detect EOF condition 539 * read returns 0 on EOF, no need to set error 540 */ 541 if (rpipe->pipe_state & PIPE_EOF) 542 break; 543 544 /* 545 * If the "write-side" has been blocked, wake it up now. 546 */ 547 if (rpipe->pipe_state & PIPE_WANTW) { 548 rpipe->pipe_state &= ~PIPE_WANTW; 549 wakeup(rpipe); 550 } 551 552 /* 553 * Break if some data was read. 554 */ 555 if (nread > 0) 556 break; 557 558 /* 559 * Unlock the pipe buffer for our remaining processing. We 560 * will either break out with an error or we will sleep and 561 * relock to loop. 562 */ 563 pipeunlock(rpipe); 564 565 /* 566 * Handle non-blocking mode operation or 567 * wait for more data. 568 */ 569 if (fp->f_flag & FNONBLOCK) { 570 error = EAGAIN; 571 } else { 572 rpipe->pipe_state |= PIPE_WANTR; 573 if ((error = msleep(rpipe, PIPE_MTX(rpipe), 574 PRIBIO | PCATCH, 575 "piperd", 0)) == 0) 576 error = pipelock(rpipe, 1); 577 } 578 if (error) 579 goto unlocked_error; 580 } 581 } 582 #ifdef MAC 583 locked_error: 584 #endif 585 pipeunlock(rpipe); 586 587 /* XXX: should probably do this before getting any locks. */ 588 if (error == 0) 589 vfs_timestamp(&rpipe->pipe_atime); 590 unlocked_error: 591 --rpipe->pipe_busy; 592 593 /* 594 * PIPE_WANT processing only makes sense if pipe_busy is 0. 595 */ 596 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { 597 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); 598 wakeup(rpipe); 599 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { 600 /* 601 * Handle write blocking hysteresis. 602 */ 603 if (rpipe->pipe_state & PIPE_WANTW) { 604 rpipe->pipe_state &= ~PIPE_WANTW; 605 wakeup(rpipe); 606 } 607 } 608 609 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) 610 pipeselwakeup(rpipe); 611 612 PIPE_UNLOCK(rpipe); 613 return (error); 614 } 615 616 #ifndef PIPE_NODIRECT 617 /* 618 * Map the sending processes' buffer into kernel space and wire it. 619 * This is similar to a physical write operation. 620 */ 621 static int 622 pipe_build_write_buffer(wpipe, uio) 623 struct pipe *wpipe; 624 struct uio *uio; 625 { 626 u_int size; 627 int i; 628 vm_offset_t addr, endaddr, paddr; 629 630 GIANT_REQUIRED; 631 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 632 633 size = (u_int) uio->uio_iov->iov_len; 634 if (size > wpipe->pipe_buffer.size) 635 size = wpipe->pipe_buffer.size; 636 637 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size); 638 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base); 639 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { 640 vm_page_t m; 641 642 /* 643 * vm_fault_quick() can sleep. Consequently, 644 * vm_page_lock_queue() and vm_page_unlock_queue() 645 * should not be performed outside of this loop. 646 */ 647 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 || 648 (paddr = pmap_extract(vmspace_pmap(curproc->p_vmspace), 649 addr)) == 0) { 650 int j; 651 652 vm_page_lock_queues(); 653 for (j = 0; j < i; j++) 654 vm_page_unwire(wpipe->pipe_map.ms[j], 1); 655 vm_page_unlock_queues(); 656 return (EFAULT); 657 } 658 659 m = PHYS_TO_VM_PAGE(paddr); 660 vm_page_lock_queues(); 661 vm_page_wire(m); 662 vm_page_unlock_queues(); 663 wpipe->pipe_map.ms[i] = m; 664 } 665 666 /* 667 * set up the control block 668 */ 669 wpipe->pipe_map.npages = i; 670 wpipe->pipe_map.pos = 671 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK; 672 wpipe->pipe_map.cnt = size; 673 674 /* 675 * and map the buffer 676 */ 677 if (wpipe->pipe_map.kva == 0) { 678 /* 679 * We need to allocate space for an extra page because the 680 * address range might (will) span pages at times. 681 */ 682 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map, 683 wpipe->pipe_buffer.size + PAGE_SIZE); 684 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE; 685 } 686 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms, 687 wpipe->pipe_map.npages); 688 689 /* 690 * and update the uio data 691 */ 692 693 uio->uio_iov->iov_len -= size; 694 uio->uio_iov->iov_base += size; 695 if (uio->uio_iov->iov_len == 0) 696 uio->uio_iov++; 697 uio->uio_resid -= size; 698 uio->uio_offset += size; 699 return (0); 700 } 701 702 /* 703 * unmap and unwire the process buffer 704 */ 705 static void 706 pipe_destroy_write_buffer(wpipe) 707 struct pipe *wpipe; 708 { 709 int i; 710 711 GIANT_REQUIRED; 712 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); 713 714 if (wpipe->pipe_map.kva) { 715 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages); 716 717 if (amountpipekva > MAXPIPEKVA) { 718 vm_offset_t kva = wpipe->pipe_map.kva; 719 wpipe->pipe_map.kva = 0; 720 kmem_free(kernel_map, kva, 721 wpipe->pipe_buffer.size + PAGE_SIZE); 722 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE; 723 } 724 } 725 vm_page_lock_queues(); 726 for (i = 0; i < wpipe->pipe_map.npages; i++) 727 vm_page_unwire(wpipe->pipe_map.ms[i], 1); 728 vm_page_unlock_queues(); 729 wpipe->pipe_map.npages = 0; 730 } 731 732 /* 733 * In the case of a signal, the writing process might go away. This 734 * code copies the data into the circular buffer so that the source 735 * pages can be freed without loss of data. 736 */ 737 static void 738 pipe_clone_write_buffer(wpipe) 739 struct pipe *wpipe; 740 { 741 int size; 742 int pos; 743 744 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 745 size = wpipe->pipe_map.cnt; 746 pos = wpipe->pipe_map.pos; 747 748 wpipe->pipe_buffer.in = size; 749 wpipe->pipe_buffer.out = 0; 750 wpipe->pipe_buffer.cnt = size; 751 wpipe->pipe_state &= ~PIPE_DIRECTW; 752 753 PIPE_GET_GIANT(wpipe); 754 bcopy((caddr_t) wpipe->pipe_map.kva + pos, 755 wpipe->pipe_buffer.buffer, size); 756 pipe_destroy_write_buffer(wpipe); 757 PIPE_DROP_GIANT(wpipe); 758 } 759 760 /* 761 * This implements the pipe buffer write mechanism. Note that only 762 * a direct write OR a normal pipe write can be pending at any given time. 763 * If there are any characters in the pipe buffer, the direct write will 764 * be deferred until the receiving process grabs all of the bytes from 765 * the pipe buffer. Then the direct mapping write is set-up. 766 */ 767 static int 768 pipe_direct_write(wpipe, uio) 769 struct pipe *wpipe; 770 struct uio *uio; 771 { 772 int error; 773 774 retry: 775 PIPE_LOCK_ASSERT(wpipe, MA_OWNED); 776 while (wpipe->pipe_state & PIPE_DIRECTW) { 777 if (wpipe->pipe_state & PIPE_WANTR) { 778 wpipe->pipe_state &= ~PIPE_WANTR; 779 wakeup(wpipe); 780 } 781 wpipe->pipe_state |= PIPE_WANTW; 782 error = msleep(wpipe, PIPE_MTX(wpipe), 783 PRIBIO | PCATCH, "pipdww", 0); 784 if (error) 785 goto error1; 786 if (wpipe->pipe_state & PIPE_EOF) { 787 error = EPIPE; 788 goto error1; 789 } 790 } 791 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */ 792 if (wpipe->pipe_buffer.cnt > 0) { 793 if (wpipe->pipe_state & PIPE_WANTR) { 794 wpipe->pipe_state &= ~PIPE_WANTR; 795 wakeup(wpipe); 796 } 797 798 wpipe->pipe_state |= PIPE_WANTW; 799 error = msleep(wpipe, PIPE_MTX(wpipe), 800 PRIBIO | PCATCH, "pipdwc", 0); 801 if (error) 802 goto error1; 803 if (wpipe->pipe_state & PIPE_EOF) { 804 error = EPIPE; 805 goto error1; 806 } 807 goto retry; 808 } 809 810 wpipe->pipe_state |= PIPE_DIRECTW; 811 812 pipelock(wpipe, 0); 813 PIPE_GET_GIANT(wpipe); 814 error = pipe_build_write_buffer(wpipe, uio); 815 PIPE_DROP_GIANT(wpipe); 816 pipeunlock(wpipe); 817 if (error) { 818 wpipe->pipe_state &= ~PIPE_DIRECTW; 819 goto error1; 820 } 821 822 error = 0; 823 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) { 824 if (wpipe->pipe_state & PIPE_EOF) { 825 pipelock(wpipe, 0); 826 PIPE_GET_GIANT(wpipe); 827 pipe_destroy_write_buffer(wpipe); 828 PIPE_DROP_GIANT(wpipe); 829 pipeunlock(wpipe); 830 pipeselwakeup(wpipe); 831 error = EPIPE; 832 goto error1; 833 } 834 if (wpipe->pipe_state & PIPE_WANTR) { 835 wpipe->pipe_state &= ~PIPE_WANTR; 836 wakeup(wpipe); 837 } 838 pipeselwakeup(wpipe); 839 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, 840 "pipdwt", 0); 841 } 842 843 pipelock(wpipe,0); 844 if (wpipe->pipe_state & PIPE_DIRECTW) { 845 /* 846 * this bit of trickery substitutes a kernel buffer for 847 * the process that might be going away. 848 */ 849 pipe_clone_write_buffer(wpipe); 850 } else { 851 PIPE_GET_GIANT(wpipe); 852 pipe_destroy_write_buffer(wpipe); 853 PIPE_DROP_GIANT(wpipe); 854 } 855 pipeunlock(wpipe); 856 return (error); 857 858 error1: 859 wakeup(wpipe); 860 return (error); 861 } 862 #endif 863 864 static int 865 pipe_write(fp, uio, active_cred, flags, td) 866 struct file *fp; 867 struct uio *uio; 868 struct ucred *active_cred; 869 struct thread *td; 870 int flags; 871 { 872 int error = 0; 873 int orig_resid; 874 struct pipe *wpipe, *rpipe; 875 876 rpipe = (struct pipe *) fp->f_data; 877 wpipe = rpipe->pipe_peer; 878 879 PIPE_LOCK(rpipe); 880 /* 881 * detect loss of pipe read side, issue SIGPIPE if lost. 882 */ 883 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 884 PIPE_UNLOCK(rpipe); 885 return (EPIPE); 886 } 887 #ifdef MAC 888 error = mac_check_pipe_write(active_cred, wpipe); 889 if (error) { 890 PIPE_UNLOCK(rpipe); 891 return (error); 892 } 893 #endif 894 ++wpipe->pipe_busy; 895 896 /* 897 * If it is advantageous to resize the pipe buffer, do 898 * so. 899 */ 900 if ((uio->uio_resid > PIPE_SIZE) && 901 (nbigpipe < LIMITBIGPIPES) && 902 (wpipe->pipe_state & PIPE_DIRECTW) == 0 && 903 (wpipe->pipe_buffer.size <= PIPE_SIZE) && 904 (wpipe->pipe_buffer.cnt == 0)) { 905 906 if ((error = pipelock(wpipe,1)) == 0) { 907 PIPE_GET_GIANT(wpipe); 908 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) 909 nbigpipe++; 910 PIPE_DROP_GIANT(wpipe); 911 pipeunlock(wpipe); 912 } 913 } 914 915 /* 916 * If an early error occured unbusy and return, waking up any pending 917 * readers. 918 */ 919 if (error) { 920 --wpipe->pipe_busy; 921 if ((wpipe->pipe_busy == 0) && 922 (wpipe->pipe_state & PIPE_WANT)) { 923 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 924 wakeup(wpipe); 925 } 926 PIPE_UNLOCK(rpipe); 927 return(error); 928 } 929 930 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone")); 931 932 orig_resid = uio->uio_resid; 933 934 while (uio->uio_resid) { 935 int space; 936 937 #ifndef PIPE_NODIRECT 938 /* 939 * If the transfer is large, we can gain performance if 940 * we do process-to-process copies directly. 941 * If the write is non-blocking, we don't use the 942 * direct write mechanism. 943 * 944 * The direct write mechanism will detect the reader going 945 * away on us. 946 */ 947 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) && 948 (fp->f_flag & FNONBLOCK) == 0 && 949 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) && 950 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) { 951 error = pipe_direct_write( wpipe, uio); 952 if (error) 953 break; 954 continue; 955 } 956 #endif 957 958 /* 959 * Pipe buffered writes cannot be coincidental with 960 * direct writes. We wait until the currently executing 961 * direct write is completed before we start filling the 962 * pipe buffer. We break out if a signal occurs or the 963 * reader goes away. 964 */ 965 retrywrite: 966 while (wpipe->pipe_state & PIPE_DIRECTW) { 967 if (wpipe->pipe_state & PIPE_WANTR) { 968 wpipe->pipe_state &= ~PIPE_WANTR; 969 wakeup(wpipe); 970 } 971 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, 972 "pipbww", 0); 973 if (wpipe->pipe_state & PIPE_EOF) 974 break; 975 if (error) 976 break; 977 } 978 if (wpipe->pipe_state & PIPE_EOF) { 979 error = EPIPE; 980 break; 981 } 982 983 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 984 985 /* Writes of size <= PIPE_BUF must be atomic. */ 986 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) 987 space = 0; 988 989 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) { 990 if ((error = pipelock(wpipe,1)) == 0) { 991 int size; /* Transfer size */ 992 int segsize; /* first segment to transfer */ 993 994 /* 995 * It is possible for a direct write to 996 * slip in on us... handle it here... 997 */ 998 if (wpipe->pipe_state & PIPE_DIRECTW) { 999 pipeunlock(wpipe); 1000 goto retrywrite; 1001 } 1002 /* 1003 * If a process blocked in uiomove, our 1004 * value for space might be bad. 1005 * 1006 * XXX will we be ok if the reader has gone 1007 * away here? 1008 */ 1009 if (space > wpipe->pipe_buffer.size - 1010 wpipe->pipe_buffer.cnt) { 1011 pipeunlock(wpipe); 1012 goto retrywrite; 1013 } 1014 1015 /* 1016 * Transfer size is minimum of uio transfer 1017 * and free space in pipe buffer. 1018 */ 1019 if (space > uio->uio_resid) 1020 size = uio->uio_resid; 1021 else 1022 size = space; 1023 /* 1024 * First segment to transfer is minimum of 1025 * transfer size and contiguous space in 1026 * pipe buffer. If first segment to transfer 1027 * is less than the transfer size, we've got 1028 * a wraparound in the buffer. 1029 */ 1030 segsize = wpipe->pipe_buffer.size - 1031 wpipe->pipe_buffer.in; 1032 if (segsize > size) 1033 segsize = size; 1034 1035 /* Transfer first segment */ 1036 1037 PIPE_UNLOCK(rpipe); 1038 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 1039 segsize, uio); 1040 PIPE_LOCK(rpipe); 1041 1042 if (error == 0 && segsize < size) { 1043 /* 1044 * Transfer remaining part now, to 1045 * support atomic writes. Wraparound 1046 * happened. 1047 */ 1048 if (wpipe->pipe_buffer.in + segsize != 1049 wpipe->pipe_buffer.size) 1050 panic("Expected pipe buffer wraparound disappeared"); 1051 1052 PIPE_UNLOCK(rpipe); 1053 error = uiomove(&wpipe->pipe_buffer.buffer[0], 1054 size - segsize, uio); 1055 PIPE_LOCK(rpipe); 1056 } 1057 if (error == 0) { 1058 wpipe->pipe_buffer.in += size; 1059 if (wpipe->pipe_buffer.in >= 1060 wpipe->pipe_buffer.size) { 1061 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) 1062 panic("Expected wraparound bad"); 1063 wpipe->pipe_buffer.in = size - segsize; 1064 } 1065 1066 wpipe->pipe_buffer.cnt += size; 1067 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) 1068 panic("Pipe buffer overflow"); 1069 1070 } 1071 pipeunlock(wpipe); 1072 } 1073 if (error) 1074 break; 1075 1076 } else { 1077 /* 1078 * If the "read-side" has been blocked, wake it up now. 1079 */ 1080 if (wpipe->pipe_state & PIPE_WANTR) { 1081 wpipe->pipe_state &= ~PIPE_WANTR; 1082 wakeup(wpipe); 1083 } 1084 1085 /* 1086 * don't block on non-blocking I/O 1087 */ 1088 if (fp->f_flag & FNONBLOCK) { 1089 error = EAGAIN; 1090 break; 1091 } 1092 1093 /* 1094 * We have no more space and have something to offer, 1095 * wake up select/poll. 1096 */ 1097 pipeselwakeup(wpipe); 1098 1099 wpipe->pipe_state |= PIPE_WANTW; 1100 error = msleep(wpipe, PIPE_MTX(rpipe), 1101 PRIBIO | PCATCH, "pipewr", 0); 1102 if (error != 0) 1103 break; 1104 /* 1105 * If read side wants to go away, we just issue a signal 1106 * to ourselves. 1107 */ 1108 if (wpipe->pipe_state & PIPE_EOF) { 1109 error = EPIPE; 1110 break; 1111 } 1112 } 1113 } 1114 1115 --wpipe->pipe_busy; 1116 1117 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { 1118 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); 1119 wakeup(wpipe); 1120 } else if (wpipe->pipe_buffer.cnt > 0) { 1121 /* 1122 * If we have put any characters in the buffer, we wake up 1123 * the reader. 1124 */ 1125 if (wpipe->pipe_state & PIPE_WANTR) { 1126 wpipe->pipe_state &= ~PIPE_WANTR; 1127 wakeup(wpipe); 1128 } 1129 } 1130 1131 /* 1132 * Don't return EPIPE if I/O was successful 1133 */ 1134 if ((wpipe->pipe_buffer.cnt == 0) && 1135 (uio->uio_resid == 0) && 1136 (error == EPIPE)) { 1137 error = 0; 1138 } 1139 1140 if (error == 0) 1141 vfs_timestamp(&wpipe->pipe_mtime); 1142 1143 /* 1144 * We have something to offer, 1145 * wake up select/poll. 1146 */ 1147 if (wpipe->pipe_buffer.cnt) 1148 pipeselwakeup(wpipe); 1149 1150 PIPE_UNLOCK(rpipe); 1151 return (error); 1152 } 1153 1154 /* 1155 * we implement a very minimal set of ioctls for compatibility with sockets. 1156 */ 1157 int 1158 pipe_ioctl(fp, cmd, data, active_cred, td) 1159 struct file *fp; 1160 u_long cmd; 1161 void *data; 1162 struct ucred *active_cred; 1163 struct thread *td; 1164 { 1165 struct pipe *mpipe = (struct pipe *)fp->f_data; 1166 #ifdef MAC 1167 int error; 1168 1169 /* XXXMAC: Pipe should be locked for this check. */ 1170 error = mac_check_pipe_ioctl(active_cred, mpipe, cmd, data); 1171 if (error) 1172 return (error); 1173 #endif 1174 1175 switch (cmd) { 1176 1177 case FIONBIO: 1178 return (0); 1179 1180 case FIOASYNC: 1181 PIPE_LOCK(mpipe); 1182 if (*(int *)data) { 1183 mpipe->pipe_state |= PIPE_ASYNC; 1184 } else { 1185 mpipe->pipe_state &= ~PIPE_ASYNC; 1186 } 1187 PIPE_UNLOCK(mpipe); 1188 return (0); 1189 1190 case FIONREAD: 1191 PIPE_LOCK(mpipe); 1192 if (mpipe->pipe_state & PIPE_DIRECTW) 1193 *(int *)data = mpipe->pipe_map.cnt; 1194 else 1195 *(int *)data = mpipe->pipe_buffer.cnt; 1196 PIPE_UNLOCK(mpipe); 1197 return (0); 1198 1199 case FIOSETOWN: 1200 return (fsetown(*(int *)data, &mpipe->pipe_sigio)); 1201 1202 case FIOGETOWN: 1203 *(int *)data = fgetown(mpipe->pipe_sigio); 1204 return (0); 1205 1206 /* This is deprecated, FIOSETOWN should be used instead. */ 1207 case TIOCSPGRP: 1208 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio)); 1209 1210 /* This is deprecated, FIOGETOWN should be used instead. */ 1211 case TIOCGPGRP: 1212 *(int *)data = -fgetown(mpipe->pipe_sigio); 1213 return (0); 1214 1215 } 1216 return (ENOTTY); 1217 } 1218 1219 int 1220 pipe_poll(fp, events, active_cred, td) 1221 struct file *fp; 1222 int events; 1223 struct ucred *active_cred; 1224 struct thread *td; 1225 { 1226 struct pipe *rpipe = (struct pipe *)fp->f_data; 1227 struct pipe *wpipe; 1228 int revents = 0; 1229 #ifdef MAC 1230 int error; 1231 #endif 1232 1233 wpipe = rpipe->pipe_peer; 1234 PIPE_LOCK(rpipe); 1235 #ifdef MAC 1236 error = mac_check_pipe_poll(active_cred, rpipe); 1237 if (error) 1238 goto locked_error; 1239 #endif 1240 if (events & (POLLIN | POLLRDNORM)) 1241 if ((rpipe->pipe_state & PIPE_DIRECTW) || 1242 (rpipe->pipe_buffer.cnt > 0) || 1243 (rpipe->pipe_state & PIPE_EOF)) 1244 revents |= events & (POLLIN | POLLRDNORM); 1245 1246 if (events & (POLLOUT | POLLWRNORM)) 1247 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) || 1248 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && 1249 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) 1250 revents |= events & (POLLOUT | POLLWRNORM); 1251 1252 if ((rpipe->pipe_state & PIPE_EOF) || 1253 (wpipe == NULL) || 1254 (wpipe->pipe_state & PIPE_EOF)) 1255 revents |= POLLHUP; 1256 1257 if (revents == 0) { 1258 if (events & (POLLIN | POLLRDNORM)) { 1259 selrecord(td, &rpipe->pipe_sel); 1260 rpipe->pipe_state |= PIPE_SEL; 1261 } 1262 1263 if (events & (POLLOUT | POLLWRNORM)) { 1264 selrecord(td, &wpipe->pipe_sel); 1265 wpipe->pipe_state |= PIPE_SEL; 1266 } 1267 } 1268 #ifdef MAC 1269 locked_error: 1270 #endif 1271 PIPE_UNLOCK(rpipe); 1272 1273 return (revents); 1274 } 1275 1276 /* 1277 * We shouldn't need locks here as we're doing a read and this should 1278 * be a natural race. 1279 */ 1280 static int 1281 pipe_stat(fp, ub, active_cred, td) 1282 struct file *fp; 1283 struct stat *ub; 1284 struct ucred *active_cred; 1285 struct thread *td; 1286 { 1287 struct pipe *pipe = (struct pipe *)fp->f_data; 1288 #ifdef MAC 1289 int error; 1290 1291 /* XXXMAC: Pipe should be locked for this check. */ 1292 error = mac_check_pipe_stat(active_cred, pipe); 1293 if (error) 1294 return (error); 1295 #endif 1296 bzero(ub, sizeof(*ub)); 1297 ub->st_mode = S_IFIFO; 1298 ub->st_blksize = pipe->pipe_buffer.size; 1299 ub->st_size = pipe->pipe_buffer.cnt; 1300 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize; 1301 ub->st_atimespec = pipe->pipe_atime; 1302 ub->st_mtimespec = pipe->pipe_mtime; 1303 ub->st_ctimespec = pipe->pipe_ctime; 1304 ub->st_uid = fp->f_cred->cr_uid; 1305 ub->st_gid = fp->f_cred->cr_gid; 1306 /* 1307 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen. 1308 * XXX (st_dev, st_ino) should be unique. 1309 */ 1310 return (0); 1311 } 1312 1313 /* ARGSUSED */ 1314 static int 1315 pipe_close(fp, td) 1316 struct file *fp; 1317 struct thread *td; 1318 { 1319 struct pipe *cpipe = (struct pipe *)fp->f_data; 1320 1321 fp->f_ops = &badfileops; 1322 fp->f_data = NULL; 1323 funsetown(&cpipe->pipe_sigio); 1324 pipeclose(cpipe); 1325 return (0); 1326 } 1327 1328 static void 1329 pipe_free_kmem(cpipe) 1330 struct pipe *cpipe; 1331 { 1332 1333 GIANT_REQUIRED; 1334 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)), 1335 ("pipespace: pipe mutex locked")); 1336 1337 if (cpipe->pipe_buffer.buffer != NULL) { 1338 if (cpipe->pipe_buffer.size > PIPE_SIZE) 1339 --nbigpipe; 1340 amountpipekva -= cpipe->pipe_buffer.size; 1341 kmem_free(kernel_map, 1342 (vm_offset_t)cpipe->pipe_buffer.buffer, 1343 cpipe->pipe_buffer.size); 1344 cpipe->pipe_buffer.buffer = NULL; 1345 } 1346 #ifndef PIPE_NODIRECT 1347 if (cpipe->pipe_map.kva != 0) { 1348 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE; 1349 kmem_free(kernel_map, 1350 cpipe->pipe_map.kva, 1351 cpipe->pipe_buffer.size + PAGE_SIZE); 1352 cpipe->pipe_map.cnt = 0; 1353 cpipe->pipe_map.kva = 0; 1354 cpipe->pipe_map.pos = 0; 1355 cpipe->pipe_map.npages = 0; 1356 } 1357 #endif 1358 } 1359 1360 /* 1361 * shutdown the pipe 1362 */ 1363 static void 1364 pipeclose(cpipe) 1365 struct pipe *cpipe; 1366 { 1367 struct pipe *ppipe; 1368 int hadpeer; 1369 1370 if (cpipe == NULL) 1371 return; 1372 1373 hadpeer = 0; 1374 1375 /* partially created pipes won't have a valid mutex. */ 1376 if (PIPE_MTX(cpipe) != NULL) 1377 PIPE_LOCK(cpipe); 1378 1379 pipeselwakeup(cpipe); 1380 1381 /* 1382 * If the other side is blocked, wake it up saying that 1383 * we want to close it down. 1384 */ 1385 while (cpipe->pipe_busy) { 1386 wakeup(cpipe); 1387 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF; 1388 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); 1389 } 1390 1391 #ifdef MAC 1392 if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL) 1393 mac_destroy_pipe(cpipe); 1394 #endif 1395 1396 /* 1397 * Disconnect from peer 1398 */ 1399 if ((ppipe = cpipe->pipe_peer) != NULL) { 1400 hadpeer++; 1401 pipeselwakeup(ppipe); 1402 1403 ppipe->pipe_state |= PIPE_EOF; 1404 wakeup(ppipe); 1405 KNOTE(&ppipe->pipe_sel.si_note, 0); 1406 ppipe->pipe_peer = NULL; 1407 } 1408 /* 1409 * free resources 1410 */ 1411 if (PIPE_MTX(cpipe) != NULL) { 1412 PIPE_UNLOCK(cpipe); 1413 if (!hadpeer) { 1414 mtx_destroy(PIPE_MTX(cpipe)); 1415 free(PIPE_MTX(cpipe), M_TEMP); 1416 } 1417 } 1418 mtx_lock(&Giant); 1419 pipe_free_kmem(cpipe); 1420 uma_zfree(pipe_zone, cpipe); 1421 mtx_unlock(&Giant); 1422 } 1423 1424 /*ARGSUSED*/ 1425 static int 1426 pipe_kqfilter(struct file *fp, struct knote *kn) 1427 { 1428 struct pipe *cpipe; 1429 1430 cpipe = (struct pipe *)kn->kn_fp->f_data; 1431 switch (kn->kn_filter) { 1432 case EVFILT_READ: 1433 kn->kn_fop = &pipe_rfiltops; 1434 break; 1435 case EVFILT_WRITE: 1436 kn->kn_fop = &pipe_wfiltops; 1437 cpipe = cpipe->pipe_peer; 1438 if (cpipe == NULL) 1439 /* other end of pipe has been closed */ 1440 return (EBADF); 1441 break; 1442 default: 1443 return (1); 1444 } 1445 kn->kn_hook = cpipe; 1446 1447 PIPE_LOCK(cpipe); 1448 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext); 1449 PIPE_UNLOCK(cpipe); 1450 return (0); 1451 } 1452 1453 static void 1454 filt_pipedetach(struct knote *kn) 1455 { 1456 struct pipe *cpipe = (struct pipe *)kn->kn_hook; 1457 1458 PIPE_LOCK(cpipe); 1459 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext); 1460 PIPE_UNLOCK(cpipe); 1461 } 1462 1463 /*ARGSUSED*/ 1464 static int 1465 filt_piperead(struct knote *kn, long hint) 1466 { 1467 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1468 struct pipe *wpipe = rpipe->pipe_peer; 1469 1470 PIPE_LOCK(rpipe); 1471 kn->kn_data = rpipe->pipe_buffer.cnt; 1472 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW)) 1473 kn->kn_data = rpipe->pipe_map.cnt; 1474 1475 if ((rpipe->pipe_state & PIPE_EOF) || 1476 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1477 kn->kn_flags |= EV_EOF; 1478 PIPE_UNLOCK(rpipe); 1479 return (1); 1480 } 1481 PIPE_UNLOCK(rpipe); 1482 return (kn->kn_data > 0); 1483 } 1484 1485 /*ARGSUSED*/ 1486 static int 1487 filt_pipewrite(struct knote *kn, long hint) 1488 { 1489 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data; 1490 struct pipe *wpipe = rpipe->pipe_peer; 1491 1492 PIPE_LOCK(rpipe); 1493 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { 1494 kn->kn_data = 0; 1495 kn->kn_flags |= EV_EOF; 1496 PIPE_UNLOCK(rpipe); 1497 return (1); 1498 } 1499 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; 1500 if (wpipe->pipe_state & PIPE_DIRECTW) 1501 kn->kn_data = 0; 1502 1503 PIPE_UNLOCK(rpipe); 1504 return (kn->kn_data >= PIPE_BUF); 1505 } 1506