xref: /freebsd/sys/kern/sys_pipe.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*
2  * Copyright (c) 1996 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD$
20  */
21 
22 /*
23  * This file contains a high-performance replacement for the socket-based
24  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
25  * all features of sockets, but does do everything that pipes normally
26  * do.
27  */
28 
29 /*
30  * This code has two modes of operation, a small write mode and a large
31  * write mode.  The small write mode acts like conventional pipes with
32  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
33  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
34  * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
35  * the receiving process can copy it directly from the pages in the sending
36  * process.
37  *
38  * If the sending process receives a signal, it is possible that it will
39  * go away, and certainly its address space can change, because control
40  * is returned back to the user-mode side.  In that case, the pipe code
41  * arranges to copy the buffer supplied by the user process, to a pageable
42  * kernel buffer, and the receiving process will grab the data from the
43  * pageable kernel buffer.  Since signals don't happen all that often,
44  * the copy operation is normally eliminated.
45  *
46  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
47  * happen for small transfers so that the system will not spend all of
48  * its time context switching.  PIPE_SIZE is constrained by the
49  * amount of kernel virtual memory.
50  */
51 
52 #include "opt_mac.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/fcntl.h>
57 #include <sys/file.h>
58 #include <sys/filedesc.h>
59 #include <sys/filio.h>
60 #include <sys/kernel.h>
61 #include <sys/lock.h>
62 #include <sys/mac.h>
63 #include <sys/mutex.h>
64 #include <sys/ttycom.h>
65 #include <sys/stat.h>
66 #include <sys/malloc.h>
67 #include <sys/poll.h>
68 #include <sys/selinfo.h>
69 #include <sys/signalvar.h>
70 #include <sys/sysproto.h>
71 #include <sys/pipe.h>
72 #include <sys/proc.h>
73 #include <sys/vnode.h>
74 #include <sys/uio.h>
75 #include <sys/event.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_extern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/uma.h>
86 
87 /*
88  * Use this define if you want to disable *fancy* VM things.  Expect an
89  * approx 30% decrease in transfer rate.  This could be useful for
90  * NetBSD or OpenBSD.
91  */
92 /* #define PIPE_NODIRECT */
93 
94 /*
95  * interfaces to the outside world
96  */
97 static fo_rdwr_t	pipe_read;
98 static fo_rdwr_t	pipe_write;
99 static fo_ioctl_t	pipe_ioctl;
100 static fo_poll_t	pipe_poll;
101 static fo_kqfilter_t	pipe_kqfilter;
102 static fo_stat_t	pipe_stat;
103 static fo_close_t	pipe_close;
104 
105 static struct fileops pipeops = {
106 	pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
107 	pipe_stat, pipe_close, DFLAG_PASSABLE
108 };
109 
110 static void	filt_pipedetach(struct knote *kn);
111 static int	filt_piperead(struct knote *kn, long hint);
112 static int	filt_pipewrite(struct knote *kn, long hint);
113 
114 static struct filterops pipe_rfiltops =
115 	{ 1, NULL, filt_pipedetach, filt_piperead };
116 static struct filterops pipe_wfiltops =
117 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
118 
119 #define PIPE_GET_GIANT(pipe)						\
120 	do {								\
121 		KASSERT(((pipe)->pipe_state & PIPE_LOCKFL) != 0,	\
122 		    ("%s:%d PIPE_GET_GIANT: line pipe not locked",	\
123 		     __FILE__, __LINE__));				\
124 		PIPE_UNLOCK(pipe);					\
125 		mtx_lock(&Giant);					\
126 	} while (0)
127 
128 #define PIPE_DROP_GIANT(pipe)						\
129 	do {								\
130 		mtx_unlock(&Giant);					\
131 		PIPE_LOCK(pipe);					\
132 	} while (0)
133 
134 /*
135  * Default pipe buffer size(s), this can be kind-of large now because pipe
136  * space is pageable.  The pipe code will try to maintain locality of
137  * reference for performance reasons, so small amounts of outstanding I/O
138  * will not wipe the cache.
139  */
140 #define MINPIPESIZE (PIPE_SIZE/3)
141 #define MAXPIPESIZE (2*PIPE_SIZE/3)
142 
143 /*
144  * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
145  * is there so that on large systems, we don't exhaust it.
146  */
147 #define MAXPIPEKVA (8*1024*1024)
148 
149 /*
150  * Limit for direct transfers, we cannot, of course limit
151  * the amount of kva for pipes in general though.
152  */
153 #define LIMITPIPEKVA (16*1024*1024)
154 
155 /*
156  * Limit the number of "big" pipes
157  */
158 #define LIMITBIGPIPES	32
159 static int nbigpipe;
160 
161 static int amountpipekva;
162 
163 static void pipeinit(void *dummy __unused);
164 static void pipeclose(struct pipe *cpipe);
165 static void pipe_free_kmem(struct pipe *cpipe);
166 static int pipe_create(struct pipe **cpipep);
167 static __inline int pipelock(struct pipe *cpipe, int catch);
168 static __inline void pipeunlock(struct pipe *cpipe);
169 static __inline void pipeselwakeup(struct pipe *cpipe);
170 #ifndef PIPE_NODIRECT
171 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
172 static void pipe_destroy_write_buffer(struct pipe *wpipe);
173 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
174 static void pipe_clone_write_buffer(struct pipe *wpipe);
175 #endif
176 static int pipespace(struct pipe *cpipe, int size);
177 
178 static uma_zone_t pipe_zone;
179 
180 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
181 
182 static void
183 pipeinit(void *dummy __unused)
184 {
185 	pipe_zone = uma_zcreate("PIPE", sizeof(struct pipe), NULL,
186 	    NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
187 }
188 
189 /*
190  * The pipe system call for the DTYPE_PIPE type of pipes
191  */
192 
193 /* ARGSUSED */
194 int
195 pipe(td, uap)
196 	struct thread *td;
197 	struct pipe_args /* {
198 		int	dummy;
199 	} */ *uap;
200 {
201 	struct filedesc *fdp = td->td_proc->p_fd;
202 	struct file *rf, *wf;
203 	struct pipe *rpipe, *wpipe;
204 	struct mtx *pmtx;
205 	int fd, error;
206 
207 	KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
208 
209 	pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
210 
211 	rpipe = wpipe = NULL;
212 	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
213 		pipeclose(rpipe);
214 		pipeclose(wpipe);
215 		free(pmtx, M_TEMP);
216 		return (ENFILE);
217 	}
218 
219 	rpipe->pipe_state |= PIPE_DIRECTOK;
220 	wpipe->pipe_state |= PIPE_DIRECTOK;
221 
222 	error = falloc(td, &rf, &fd);
223 	if (error) {
224 		pipeclose(rpipe);
225 		pipeclose(wpipe);
226 		free(pmtx, M_TEMP);
227 		return (error);
228 	}
229 	fhold(rf);
230 	td->td_retval[0] = fd;
231 
232 	/*
233 	 * Warning: once we've gotten past allocation of the fd for the
234 	 * read-side, we can only drop the read side via fdrop() in order
235 	 * to avoid races against processes which manage to dup() the read
236 	 * side while we are blocked trying to allocate the write side.
237 	 */
238 	FILE_LOCK(rf);
239 	rf->f_flag = FREAD | FWRITE;
240 	rf->f_type = DTYPE_PIPE;
241 	rf->f_data = rpipe;
242 	rf->f_ops = &pipeops;
243 	FILE_UNLOCK(rf);
244 	error = falloc(td, &wf, &fd);
245 	if (error) {
246 		FILEDESC_LOCK(fdp);
247 		if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
248 			fdp->fd_ofiles[td->td_retval[0]] = NULL;
249 			FILEDESC_UNLOCK(fdp);
250 			fdrop(rf, td);
251 		} else
252 			FILEDESC_UNLOCK(fdp);
253 		fdrop(rf, td);
254 		/* rpipe has been closed by fdrop(). */
255 		pipeclose(wpipe);
256 		free(pmtx, M_TEMP);
257 		return (error);
258 	}
259 	FILE_LOCK(wf);
260 	wf->f_flag = FREAD | FWRITE;
261 	wf->f_type = DTYPE_PIPE;
262 	wf->f_data = wpipe;
263 	wf->f_ops = &pipeops;
264 	FILE_UNLOCK(wf);
265 	td->td_retval[1] = fd;
266 	rpipe->pipe_peer = wpipe;
267 	wpipe->pipe_peer = rpipe;
268 #ifdef MAC
269 	/*
270 	 * struct pipe represents a pipe endpoint.  The MAC label is shared
271 	 * between the connected endpoints.  As a result mac_init_pipe() and
272 	 * mac_create_pipe() should only be called on one of the endpoints
273 	 * after they have been connected.
274 	 */
275 	mac_init_pipe(rpipe);
276 	mac_create_pipe(td->td_ucred, rpipe);
277 #endif
278 	mtx_init(pmtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
279 	rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
280 	fdrop(rf, td);
281 
282 	return (0);
283 }
284 
285 /*
286  * Allocate kva for pipe circular buffer, the space is pageable
287  * This routine will 'realloc' the size of a pipe safely, if it fails
288  * it will retain the old buffer.
289  * If it fails it will return ENOMEM.
290  */
291 static int
292 pipespace(cpipe, size)
293 	struct pipe *cpipe;
294 	int size;
295 {
296 	struct vm_object *object;
297 	caddr_t buffer;
298 	int npages, error;
299 
300 	GIANT_REQUIRED;
301 	KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
302 	       ("pipespace: pipe mutex locked"));
303 
304 	npages = round_page(size)/PAGE_SIZE;
305 	/*
306 	 * Create an object, I don't like the idea of paging to/from
307 	 * kernel_object.
308 	 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
309 	 */
310 	object = vm_object_allocate(OBJT_DEFAULT, npages);
311 	buffer = (caddr_t) vm_map_min(kernel_map);
312 
313 	/*
314 	 * Insert the object into the kernel map, and allocate kva for it.
315 	 * The map entry is, by default, pageable.
316 	 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
317 	 */
318 	error = vm_map_find(kernel_map, object, 0,
319 		(vm_offset_t *) &buffer, size, 1,
320 		VM_PROT_ALL, VM_PROT_ALL, 0);
321 
322 	if (error != KERN_SUCCESS) {
323 		vm_object_deallocate(object);
324 		return (ENOMEM);
325 	}
326 
327 	/* free old resources if we're resizing */
328 	pipe_free_kmem(cpipe);
329 	cpipe->pipe_buffer.object = object;
330 	cpipe->pipe_buffer.buffer = buffer;
331 	cpipe->pipe_buffer.size = size;
332 	cpipe->pipe_buffer.in = 0;
333 	cpipe->pipe_buffer.out = 0;
334 	cpipe->pipe_buffer.cnt = 0;
335 	atomic_add_int(&amountpipekva, cpipe->pipe_buffer.size);
336 	return (0);
337 }
338 
339 /*
340  * initialize and allocate VM and memory for pipe
341  */
342 static int
343 pipe_create(cpipep)
344 	struct pipe **cpipep;
345 {
346 	struct pipe *cpipe;
347 	int error;
348 
349 	*cpipep = uma_zalloc(pipe_zone, M_WAITOK);
350 	if (*cpipep == NULL)
351 		return (ENOMEM);
352 
353 	cpipe = *cpipep;
354 
355 	/* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
356 	cpipe->pipe_buffer.object = NULL;
357 #ifndef PIPE_NODIRECT
358 	cpipe->pipe_map.kva = 0;
359 #endif
360 	/*
361 	 * protect so pipeclose() doesn't follow a junk pointer
362 	 * if pipespace() fails.
363 	 */
364 	bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel));
365 	cpipe->pipe_state = 0;
366 	cpipe->pipe_peer = NULL;
367 	cpipe->pipe_busy = 0;
368 
369 #ifndef PIPE_NODIRECT
370 	/*
371 	 * pipe data structure initializations to support direct pipe I/O
372 	 */
373 	cpipe->pipe_map.cnt = 0;
374 	cpipe->pipe_map.kva = 0;
375 	cpipe->pipe_map.pos = 0;
376 	cpipe->pipe_map.npages = 0;
377 	/* cpipe->pipe_map.ms[] = invalid */
378 #endif
379 
380 	cpipe->pipe_mtxp = NULL;	/* avoid pipespace assertion */
381 	error = pipespace(cpipe, PIPE_SIZE);
382 	if (error)
383 		return (error);
384 
385 	vfs_timestamp(&cpipe->pipe_ctime);
386 	cpipe->pipe_atime = cpipe->pipe_ctime;
387 	cpipe->pipe_mtime = cpipe->pipe_ctime;
388 
389 	return (0);
390 }
391 
392 
393 /*
394  * lock a pipe for I/O, blocking other access
395  */
396 static __inline int
397 pipelock(cpipe, catch)
398 	struct pipe *cpipe;
399 	int catch;
400 {
401 	int error;
402 
403 	PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
404 	while (cpipe->pipe_state & PIPE_LOCKFL) {
405 		cpipe->pipe_state |= PIPE_LWANT;
406 		error = msleep(cpipe, PIPE_MTX(cpipe),
407 		    catch ? (PRIBIO | PCATCH) : PRIBIO,
408 		    "pipelk", 0);
409 		if (error != 0)
410 			return (error);
411 	}
412 	cpipe->pipe_state |= PIPE_LOCKFL;
413 	return (0);
414 }
415 
416 /*
417  * unlock a pipe I/O lock
418  */
419 static __inline void
420 pipeunlock(cpipe)
421 	struct pipe *cpipe;
422 {
423 
424 	PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
425 	cpipe->pipe_state &= ~PIPE_LOCKFL;
426 	if (cpipe->pipe_state & PIPE_LWANT) {
427 		cpipe->pipe_state &= ~PIPE_LWANT;
428 		wakeup(cpipe);
429 	}
430 }
431 
432 static __inline void
433 pipeselwakeup(cpipe)
434 	struct pipe *cpipe;
435 {
436 
437 	if (cpipe->pipe_state & PIPE_SEL) {
438 		cpipe->pipe_state &= ~PIPE_SEL;
439 		selwakeup(&cpipe->pipe_sel);
440 	}
441 	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
442 		pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
443 	KNOTE(&cpipe->pipe_sel.si_note, 0);
444 }
445 
446 /* ARGSUSED */
447 static int
448 pipe_read(fp, uio, active_cred, flags, td)
449 	struct file *fp;
450 	struct uio *uio;
451 	struct ucred *active_cred;
452 	struct thread *td;
453 	int flags;
454 {
455 	struct pipe *rpipe = fp->f_data;
456 	int error;
457 	int nread = 0;
458 	u_int size;
459 
460 	PIPE_LOCK(rpipe);
461 	++rpipe->pipe_busy;
462 	error = pipelock(rpipe, 1);
463 	if (error)
464 		goto unlocked_error;
465 
466 #ifdef MAC
467 	error = mac_check_pipe_read(active_cred, rpipe);
468 	if (error)
469 		goto locked_error;
470 #endif
471 
472 	while (uio->uio_resid) {
473 		/*
474 		 * normal pipe buffer receive
475 		 */
476 		if (rpipe->pipe_buffer.cnt > 0) {
477 			size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
478 			if (size > rpipe->pipe_buffer.cnt)
479 				size = rpipe->pipe_buffer.cnt;
480 			if (size > (u_int) uio->uio_resid)
481 				size = (u_int) uio->uio_resid;
482 
483 			PIPE_UNLOCK(rpipe);
484 			error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
485 					size, uio);
486 			PIPE_LOCK(rpipe);
487 			if (error)
488 				break;
489 
490 			rpipe->pipe_buffer.out += size;
491 			if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
492 				rpipe->pipe_buffer.out = 0;
493 
494 			rpipe->pipe_buffer.cnt -= size;
495 
496 			/*
497 			 * If there is no more to read in the pipe, reset
498 			 * its pointers to the beginning.  This improves
499 			 * cache hit stats.
500 			 */
501 			if (rpipe->pipe_buffer.cnt == 0) {
502 				rpipe->pipe_buffer.in = 0;
503 				rpipe->pipe_buffer.out = 0;
504 			}
505 			nread += size;
506 #ifndef PIPE_NODIRECT
507 		/*
508 		 * Direct copy, bypassing a kernel buffer.
509 		 */
510 		} else if ((size = rpipe->pipe_map.cnt) &&
511 			   (rpipe->pipe_state & PIPE_DIRECTW)) {
512 			caddr_t	va;
513 			if (size > (u_int) uio->uio_resid)
514 				size = (u_int) uio->uio_resid;
515 
516 			va = (caddr_t) rpipe->pipe_map.kva +
517 			    rpipe->pipe_map.pos;
518 			PIPE_UNLOCK(rpipe);
519 			error = uiomove(va, size, uio);
520 			PIPE_LOCK(rpipe);
521 			if (error)
522 				break;
523 			nread += size;
524 			rpipe->pipe_map.pos += size;
525 			rpipe->pipe_map.cnt -= size;
526 			if (rpipe->pipe_map.cnt == 0) {
527 				rpipe->pipe_state &= ~PIPE_DIRECTW;
528 				wakeup(rpipe);
529 			}
530 #endif
531 		} else {
532 			/*
533 			 * detect EOF condition
534 			 * read returns 0 on EOF, no need to set error
535 			 */
536 			if (rpipe->pipe_state & PIPE_EOF)
537 				break;
538 
539 			/*
540 			 * If the "write-side" has been blocked, wake it up now.
541 			 */
542 			if (rpipe->pipe_state & PIPE_WANTW) {
543 				rpipe->pipe_state &= ~PIPE_WANTW;
544 				wakeup(rpipe);
545 			}
546 
547 			/*
548 			 * Break if some data was read.
549 			 */
550 			if (nread > 0)
551 				break;
552 
553 			/*
554 			 * Unlock the pipe buffer for our remaining processing.  We
555 			 * will either break out with an error or we will sleep and
556 			 * relock to loop.
557 			 */
558 			pipeunlock(rpipe);
559 
560 			/*
561 			 * Handle non-blocking mode operation or
562 			 * wait for more data.
563 			 */
564 			if (fp->f_flag & FNONBLOCK) {
565 				error = EAGAIN;
566 			} else {
567 				rpipe->pipe_state |= PIPE_WANTR;
568 				if ((error = msleep(rpipe, PIPE_MTX(rpipe),
569 				    PRIBIO | PCATCH,
570 				    "piperd", 0)) == 0)
571 					error = pipelock(rpipe, 1);
572 			}
573 			if (error)
574 				goto unlocked_error;
575 		}
576 	}
577 #ifdef MAC
578 locked_error:
579 #endif
580 	pipeunlock(rpipe);
581 
582 	/* XXX: should probably do this before getting any locks. */
583 	if (error == 0)
584 		vfs_timestamp(&rpipe->pipe_atime);
585 unlocked_error:
586 	--rpipe->pipe_busy;
587 
588 	/*
589 	 * PIPE_WANT processing only makes sense if pipe_busy is 0.
590 	 */
591 	if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
592 		rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
593 		wakeup(rpipe);
594 	} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
595 		/*
596 		 * Handle write blocking hysteresis.
597 		 */
598 		if (rpipe->pipe_state & PIPE_WANTW) {
599 			rpipe->pipe_state &= ~PIPE_WANTW;
600 			wakeup(rpipe);
601 		}
602 	}
603 
604 	if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
605 		pipeselwakeup(rpipe);
606 
607 	PIPE_UNLOCK(rpipe);
608 	return (error);
609 }
610 
611 #ifndef PIPE_NODIRECT
612 /*
613  * Map the sending processes' buffer into kernel space and wire it.
614  * This is similar to a physical write operation.
615  */
616 static int
617 pipe_build_write_buffer(wpipe, uio)
618 	struct pipe *wpipe;
619 	struct uio *uio;
620 {
621 	u_int size;
622 	int i;
623 	vm_offset_t addr, endaddr, paddr;
624 
625 	GIANT_REQUIRED;
626 	PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
627 
628 	size = (u_int) uio->uio_iov->iov_len;
629 	if (size > wpipe->pipe_buffer.size)
630 		size = wpipe->pipe_buffer.size;
631 
632 	endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
633 	addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
634 	for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
635 		vm_page_t m;
636 
637 		/*
638 		 * vm_fault_quick() can sleep.  Consequently,
639 		 * vm_page_lock_queue() and vm_page_unlock_queue()
640 		 * should not be performed outside of this loop.
641 		 */
642 		if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
643 		    (paddr = pmap_extract(vmspace_pmap(curproc->p_vmspace),
644 		     addr)) == 0) {
645 			int j;
646 
647 			vm_page_lock_queues();
648 			for (j = 0; j < i; j++)
649 				vm_page_unwire(wpipe->pipe_map.ms[j], 1);
650 			vm_page_unlock_queues();
651 			return (EFAULT);
652 		}
653 
654 		m = PHYS_TO_VM_PAGE(paddr);
655 		vm_page_lock_queues();
656 		vm_page_wire(m);
657 		vm_page_unlock_queues();
658 		wpipe->pipe_map.ms[i] = m;
659 	}
660 
661 /*
662  * set up the control block
663  */
664 	wpipe->pipe_map.npages = i;
665 	wpipe->pipe_map.pos =
666 	    ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
667 	wpipe->pipe_map.cnt = size;
668 
669 /*
670  * and map the buffer
671  */
672 	if (wpipe->pipe_map.kva == 0) {
673 		/*
674 		 * We need to allocate space for an extra page because the
675 		 * address range might (will) span pages at times.
676 		 */
677 		wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
678 			wpipe->pipe_buffer.size + PAGE_SIZE);
679 		atomic_add_int(&amountpipekva,
680 		    wpipe->pipe_buffer.size + PAGE_SIZE);
681 	}
682 	pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
683 		wpipe->pipe_map.npages);
684 
685 /*
686  * and update the uio data
687  */
688 
689 	uio->uio_iov->iov_len -= size;
690 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
691 	if (uio->uio_iov->iov_len == 0)
692 		uio->uio_iov++;
693 	uio->uio_resid -= size;
694 	uio->uio_offset += size;
695 	return (0);
696 }
697 
698 /*
699  * unmap and unwire the process buffer
700  */
701 static void
702 pipe_destroy_write_buffer(wpipe)
703 	struct pipe *wpipe;
704 {
705 	int i;
706 
707 	GIANT_REQUIRED;
708 	PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
709 
710 	if (wpipe->pipe_map.kva) {
711 		pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
712 
713 		if (amountpipekva > MAXPIPEKVA) {
714 			vm_offset_t kva = wpipe->pipe_map.kva;
715 			wpipe->pipe_map.kva = 0;
716 			kmem_free(kernel_map, kva,
717 				wpipe->pipe_buffer.size + PAGE_SIZE);
718 			atomic_subtract_int(&amountpipekva,
719 			    wpipe->pipe_buffer.size + PAGE_SIZE);
720 		}
721 	}
722 	vm_page_lock_queues();
723 	for (i = 0; i < wpipe->pipe_map.npages; i++)
724 		vm_page_unwire(wpipe->pipe_map.ms[i], 1);
725 	vm_page_unlock_queues();
726 	wpipe->pipe_map.npages = 0;
727 }
728 
729 /*
730  * In the case of a signal, the writing process might go away.  This
731  * code copies the data into the circular buffer so that the source
732  * pages can be freed without loss of data.
733  */
734 static void
735 pipe_clone_write_buffer(wpipe)
736 	struct pipe *wpipe;
737 {
738 	int size;
739 	int pos;
740 
741 	PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
742 	size = wpipe->pipe_map.cnt;
743 	pos = wpipe->pipe_map.pos;
744 
745 	wpipe->pipe_buffer.in = size;
746 	wpipe->pipe_buffer.out = 0;
747 	wpipe->pipe_buffer.cnt = size;
748 	wpipe->pipe_state &= ~PIPE_DIRECTW;
749 
750 	PIPE_GET_GIANT(wpipe);
751 	bcopy((caddr_t) wpipe->pipe_map.kva + pos,
752 	    wpipe->pipe_buffer.buffer, size);
753 	pipe_destroy_write_buffer(wpipe);
754 	PIPE_DROP_GIANT(wpipe);
755 }
756 
757 /*
758  * This implements the pipe buffer write mechanism.  Note that only
759  * a direct write OR a normal pipe write can be pending at any given time.
760  * If there are any characters in the pipe buffer, the direct write will
761  * be deferred until the receiving process grabs all of the bytes from
762  * the pipe buffer.  Then the direct mapping write is set-up.
763  */
764 static int
765 pipe_direct_write(wpipe, uio)
766 	struct pipe *wpipe;
767 	struct uio *uio;
768 {
769 	int error;
770 
771 retry:
772 	PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
773 	while (wpipe->pipe_state & PIPE_DIRECTW) {
774 		if (wpipe->pipe_state & PIPE_WANTR) {
775 			wpipe->pipe_state &= ~PIPE_WANTR;
776 			wakeup(wpipe);
777 		}
778 		wpipe->pipe_state |= PIPE_WANTW;
779 		error = msleep(wpipe, PIPE_MTX(wpipe),
780 		    PRIBIO | PCATCH, "pipdww", 0);
781 		if (error)
782 			goto error1;
783 		if (wpipe->pipe_state & PIPE_EOF) {
784 			error = EPIPE;
785 			goto error1;
786 		}
787 	}
788 	wpipe->pipe_map.cnt = 0;	/* transfer not ready yet */
789 	if (wpipe->pipe_buffer.cnt > 0) {
790 		if (wpipe->pipe_state & PIPE_WANTR) {
791 			wpipe->pipe_state &= ~PIPE_WANTR;
792 			wakeup(wpipe);
793 		}
794 
795 		wpipe->pipe_state |= PIPE_WANTW;
796 		error = msleep(wpipe, PIPE_MTX(wpipe),
797 		    PRIBIO | PCATCH, "pipdwc", 0);
798 		if (error)
799 			goto error1;
800 		if (wpipe->pipe_state & PIPE_EOF) {
801 			error = EPIPE;
802 			goto error1;
803 		}
804 		goto retry;
805 	}
806 
807 	wpipe->pipe_state |= PIPE_DIRECTW;
808 
809 	pipelock(wpipe, 0);
810 	PIPE_GET_GIANT(wpipe);
811 	error = pipe_build_write_buffer(wpipe, uio);
812 	PIPE_DROP_GIANT(wpipe);
813 	pipeunlock(wpipe);
814 	if (error) {
815 		wpipe->pipe_state &= ~PIPE_DIRECTW;
816 		goto error1;
817 	}
818 
819 	error = 0;
820 	while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
821 		if (wpipe->pipe_state & PIPE_EOF) {
822 			pipelock(wpipe, 0);
823 			PIPE_GET_GIANT(wpipe);
824 			pipe_destroy_write_buffer(wpipe);
825 			PIPE_DROP_GIANT(wpipe);
826 			pipeunlock(wpipe);
827 			pipeselwakeup(wpipe);
828 			error = EPIPE;
829 			goto error1;
830 		}
831 		if (wpipe->pipe_state & PIPE_WANTR) {
832 			wpipe->pipe_state &= ~PIPE_WANTR;
833 			wakeup(wpipe);
834 		}
835 		pipeselwakeup(wpipe);
836 		error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
837 		    "pipdwt", 0);
838 	}
839 
840 	pipelock(wpipe,0);
841 	if (wpipe->pipe_state & PIPE_DIRECTW) {
842 		/*
843 		 * this bit of trickery substitutes a kernel buffer for
844 		 * the process that might be going away.
845 		 */
846 		pipe_clone_write_buffer(wpipe);
847 	} else {
848 		PIPE_GET_GIANT(wpipe);
849 		pipe_destroy_write_buffer(wpipe);
850 		PIPE_DROP_GIANT(wpipe);
851 	}
852 	pipeunlock(wpipe);
853 	return (error);
854 
855 error1:
856 	wakeup(wpipe);
857 	return (error);
858 }
859 #endif
860 
861 static int
862 pipe_write(fp, uio, active_cred, flags, td)
863 	struct file *fp;
864 	struct uio *uio;
865 	struct ucred *active_cred;
866 	struct thread *td;
867 	int flags;
868 {
869 	int error = 0;
870 	int orig_resid;
871 	struct pipe *wpipe, *rpipe;
872 
873 	rpipe = fp->f_data;
874 	wpipe = rpipe->pipe_peer;
875 
876 	PIPE_LOCK(rpipe);
877 	/*
878 	 * detect loss of pipe read side, issue SIGPIPE if lost.
879 	 */
880 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
881 		PIPE_UNLOCK(rpipe);
882 		return (EPIPE);
883 	}
884 #ifdef MAC
885 	error = mac_check_pipe_write(active_cred, wpipe);
886 	if (error) {
887 		PIPE_UNLOCK(rpipe);
888 		return (error);
889 	}
890 #endif
891 	++wpipe->pipe_busy;
892 
893 	/*
894 	 * If it is advantageous to resize the pipe buffer, do
895 	 * so.
896 	 */
897 	if ((uio->uio_resid > PIPE_SIZE) &&
898 		(nbigpipe < LIMITBIGPIPES) &&
899 		(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
900 		(wpipe->pipe_buffer.size <= PIPE_SIZE) &&
901 		(wpipe->pipe_buffer.cnt == 0)) {
902 
903 		if ((error = pipelock(wpipe, 1)) == 0) {
904 			PIPE_GET_GIANT(wpipe);
905 			if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
906 				nbigpipe++;
907 			PIPE_DROP_GIANT(wpipe);
908 			pipeunlock(wpipe);
909 		}
910 	}
911 
912 	/*
913 	 * If an early error occured unbusy and return, waking up any pending
914 	 * readers.
915 	 */
916 	if (error) {
917 		--wpipe->pipe_busy;
918 		if ((wpipe->pipe_busy == 0) &&
919 		    (wpipe->pipe_state & PIPE_WANT)) {
920 			wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
921 			wakeup(wpipe);
922 		}
923 		PIPE_UNLOCK(rpipe);
924 		return(error);
925 	}
926 
927 	orig_resid = uio->uio_resid;
928 
929 	while (uio->uio_resid) {
930 		int space;
931 
932 #ifndef PIPE_NODIRECT
933 		/*
934 		 * If the transfer is large, we can gain performance if
935 		 * we do process-to-process copies directly.
936 		 * If the write is non-blocking, we don't use the
937 		 * direct write mechanism.
938 		 *
939 		 * The direct write mechanism will detect the reader going
940 		 * away on us.
941 		 */
942 		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
943 		    (fp->f_flag & FNONBLOCK) == 0 &&
944 			(wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
945 			(uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
946 			error = pipe_direct_write(wpipe, uio);
947 			if (error)
948 				break;
949 			continue;
950 		}
951 #endif
952 
953 		/*
954 		 * Pipe buffered writes cannot be coincidental with
955 		 * direct writes.  We wait until the currently executing
956 		 * direct write is completed before we start filling the
957 		 * pipe buffer.  We break out if a signal occurs or the
958 		 * reader goes away.
959 		 */
960 	retrywrite:
961 		while (wpipe->pipe_state & PIPE_DIRECTW) {
962 			if (wpipe->pipe_state & PIPE_WANTR) {
963 				wpipe->pipe_state &= ~PIPE_WANTR;
964 				wakeup(wpipe);
965 			}
966 			error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
967 			    "pipbww", 0);
968 			if (wpipe->pipe_state & PIPE_EOF)
969 				break;
970 			if (error)
971 				break;
972 		}
973 		if (wpipe->pipe_state & PIPE_EOF) {
974 			error = EPIPE;
975 			break;
976 		}
977 
978 		space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
979 
980 		/* Writes of size <= PIPE_BUF must be atomic. */
981 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
982 			space = 0;
983 
984 		if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
985 			if ((error = pipelock(wpipe,1)) == 0) {
986 				int size;	/* Transfer size */
987 				int segsize;	/* first segment to transfer */
988 
989 				/*
990 				 * It is possible for a direct write to
991 				 * slip in on us... handle it here...
992 				 */
993 				if (wpipe->pipe_state & PIPE_DIRECTW) {
994 					pipeunlock(wpipe);
995 					goto retrywrite;
996 				}
997 				/*
998 				 * If a process blocked in uiomove, our
999 				 * value for space might be bad.
1000 				 *
1001 				 * XXX will we be ok if the reader has gone
1002 				 * away here?
1003 				 */
1004 				if (space > wpipe->pipe_buffer.size -
1005 				    wpipe->pipe_buffer.cnt) {
1006 					pipeunlock(wpipe);
1007 					goto retrywrite;
1008 				}
1009 
1010 				/*
1011 				 * Transfer size is minimum of uio transfer
1012 				 * and free space in pipe buffer.
1013 				 */
1014 				if (space > uio->uio_resid)
1015 					size = uio->uio_resid;
1016 				else
1017 					size = space;
1018 				/*
1019 				 * First segment to transfer is minimum of
1020 				 * transfer size and contiguous space in
1021 				 * pipe buffer.  If first segment to transfer
1022 				 * is less than the transfer size, we've got
1023 				 * a wraparound in the buffer.
1024 				 */
1025 				segsize = wpipe->pipe_buffer.size -
1026 					wpipe->pipe_buffer.in;
1027 				if (segsize > size)
1028 					segsize = size;
1029 
1030 				/* Transfer first segment */
1031 
1032 				PIPE_UNLOCK(rpipe);
1033 				error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1034 						segsize, uio);
1035 				PIPE_LOCK(rpipe);
1036 
1037 				if (error == 0 && segsize < size) {
1038 					/*
1039 					 * Transfer remaining part now, to
1040 					 * support atomic writes.  Wraparound
1041 					 * happened.
1042 					 */
1043 					if (wpipe->pipe_buffer.in + segsize !=
1044 					    wpipe->pipe_buffer.size)
1045 						panic("Expected pipe buffer wraparound disappeared");
1046 
1047 					PIPE_UNLOCK(rpipe);
1048 					error = uiomove(&wpipe->pipe_buffer.buffer[0],
1049 							size - segsize, uio);
1050 					PIPE_LOCK(rpipe);
1051 				}
1052 				if (error == 0) {
1053 					wpipe->pipe_buffer.in += size;
1054 					if (wpipe->pipe_buffer.in >=
1055 					    wpipe->pipe_buffer.size) {
1056 						if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1057 							panic("Expected wraparound bad");
1058 						wpipe->pipe_buffer.in = size - segsize;
1059 					}
1060 
1061 					wpipe->pipe_buffer.cnt += size;
1062 					if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1063 						panic("Pipe buffer overflow");
1064 
1065 				}
1066 				pipeunlock(wpipe);
1067 			}
1068 			if (error)
1069 				break;
1070 
1071 		} else {
1072 			/*
1073 			 * If the "read-side" has been blocked, wake it up now.
1074 			 */
1075 			if (wpipe->pipe_state & PIPE_WANTR) {
1076 				wpipe->pipe_state &= ~PIPE_WANTR;
1077 				wakeup(wpipe);
1078 			}
1079 
1080 			/*
1081 			 * don't block on non-blocking I/O
1082 			 */
1083 			if (fp->f_flag & FNONBLOCK) {
1084 				error = EAGAIN;
1085 				break;
1086 			}
1087 
1088 			/*
1089 			 * We have no more space and have something to offer,
1090 			 * wake up select/poll.
1091 			 */
1092 			pipeselwakeup(wpipe);
1093 
1094 			wpipe->pipe_state |= PIPE_WANTW;
1095 			error = msleep(wpipe, PIPE_MTX(rpipe),
1096 			    PRIBIO | PCATCH, "pipewr", 0);
1097 			if (error != 0)
1098 				break;
1099 			/*
1100 			 * If read side wants to go away, we just issue a signal
1101 			 * to ourselves.
1102 			 */
1103 			if (wpipe->pipe_state & PIPE_EOF) {
1104 				error = EPIPE;
1105 				break;
1106 			}
1107 		}
1108 	}
1109 
1110 	--wpipe->pipe_busy;
1111 
1112 	if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1113 		wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1114 		wakeup(wpipe);
1115 	} else if (wpipe->pipe_buffer.cnt > 0) {
1116 		/*
1117 		 * If we have put any characters in the buffer, we wake up
1118 		 * the reader.
1119 		 */
1120 		if (wpipe->pipe_state & PIPE_WANTR) {
1121 			wpipe->pipe_state &= ~PIPE_WANTR;
1122 			wakeup(wpipe);
1123 		}
1124 	}
1125 
1126 	/*
1127 	 * Don't return EPIPE if I/O was successful
1128 	 */
1129 	if ((wpipe->pipe_buffer.cnt == 0) &&
1130 	    (uio->uio_resid == 0) &&
1131 	    (error == EPIPE)) {
1132 		error = 0;
1133 	}
1134 
1135 	if (error == 0)
1136 		vfs_timestamp(&wpipe->pipe_mtime);
1137 
1138 	/*
1139 	 * We have something to offer,
1140 	 * wake up select/poll.
1141 	 */
1142 	if (wpipe->pipe_buffer.cnt)
1143 		pipeselwakeup(wpipe);
1144 
1145 	PIPE_UNLOCK(rpipe);
1146 	return (error);
1147 }
1148 
1149 /*
1150  * we implement a very minimal set of ioctls for compatibility with sockets.
1151  */
1152 static int
1153 pipe_ioctl(fp, cmd, data, active_cred, td)
1154 	struct file *fp;
1155 	u_long cmd;
1156 	void *data;
1157 	struct ucred *active_cred;
1158 	struct thread *td;
1159 {
1160 	struct pipe *mpipe = fp->f_data;
1161 #ifdef MAC
1162 	int error;
1163 #endif
1164 
1165 	PIPE_LOCK(mpipe);
1166 
1167 #ifdef MAC
1168 	error = mac_check_pipe_ioctl(active_cred, mpipe, cmd, data);
1169 	if (error)
1170 		return (error);
1171 #endif
1172 
1173 	switch (cmd) {
1174 
1175 	case FIONBIO:
1176 		PIPE_UNLOCK(mpipe);
1177 		return (0);
1178 
1179 	case FIOASYNC:
1180 		if (*(int *)data) {
1181 			mpipe->pipe_state |= PIPE_ASYNC;
1182 		} else {
1183 			mpipe->pipe_state &= ~PIPE_ASYNC;
1184 		}
1185 		PIPE_UNLOCK(mpipe);
1186 		return (0);
1187 
1188 	case FIONREAD:
1189 		if (mpipe->pipe_state & PIPE_DIRECTW)
1190 			*(int *)data = mpipe->pipe_map.cnt;
1191 		else
1192 			*(int *)data = mpipe->pipe_buffer.cnt;
1193 		PIPE_UNLOCK(mpipe);
1194 		return (0);
1195 
1196 	case FIOSETOWN:
1197 		PIPE_UNLOCK(mpipe);
1198 		return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1199 
1200 	case FIOGETOWN:
1201 		PIPE_UNLOCK(mpipe);
1202 		*(int *)data = fgetown(&mpipe->pipe_sigio);
1203 		return (0);
1204 
1205 	/* This is deprecated, FIOSETOWN should be used instead. */
1206 	case TIOCSPGRP:
1207 		PIPE_UNLOCK(mpipe);
1208 		return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1209 
1210 	/* This is deprecated, FIOGETOWN should be used instead. */
1211 	case TIOCGPGRP:
1212 		PIPE_UNLOCK(mpipe);
1213 		*(int *)data = -fgetown(&mpipe->pipe_sigio);
1214 		return (0);
1215 
1216 	}
1217 	PIPE_UNLOCK(mpipe);
1218 	return (ENOTTY);
1219 }
1220 
1221 static int
1222 pipe_poll(fp, events, active_cred, td)
1223 	struct file *fp;
1224 	int events;
1225 	struct ucred *active_cred;
1226 	struct thread *td;
1227 {
1228 	struct pipe *rpipe = fp->f_data;
1229 	struct pipe *wpipe;
1230 	int revents = 0;
1231 #ifdef MAC
1232 	int error;
1233 #endif
1234 
1235 	wpipe = rpipe->pipe_peer;
1236 	PIPE_LOCK(rpipe);
1237 #ifdef MAC
1238 	error = mac_check_pipe_poll(active_cred, rpipe);
1239 	if (error)
1240 		goto locked_error;
1241 #endif
1242 	if (events & (POLLIN | POLLRDNORM))
1243 		if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1244 		    (rpipe->pipe_buffer.cnt > 0) ||
1245 		    (rpipe->pipe_state & PIPE_EOF))
1246 			revents |= events & (POLLIN | POLLRDNORM);
1247 
1248 	if (events & (POLLOUT | POLLWRNORM))
1249 		if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
1250 		    (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1251 		     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1252 			revents |= events & (POLLOUT | POLLWRNORM);
1253 
1254 	if ((rpipe->pipe_state & PIPE_EOF) ||
1255 	    (wpipe == NULL) ||
1256 	    (wpipe->pipe_state & PIPE_EOF))
1257 		revents |= POLLHUP;
1258 
1259 	if (revents == 0) {
1260 		if (events & (POLLIN | POLLRDNORM)) {
1261 			selrecord(td, &rpipe->pipe_sel);
1262 			rpipe->pipe_state |= PIPE_SEL;
1263 		}
1264 
1265 		if (events & (POLLOUT | POLLWRNORM)) {
1266 			selrecord(td, &wpipe->pipe_sel);
1267 			wpipe->pipe_state |= PIPE_SEL;
1268 		}
1269 	}
1270 #ifdef MAC
1271 locked_error:
1272 #endif
1273 	PIPE_UNLOCK(rpipe);
1274 
1275 	return (revents);
1276 }
1277 
1278 /*
1279  * We shouldn't need locks here as we're doing a read and this should
1280  * be a natural race.
1281  */
1282 static int
1283 pipe_stat(fp, ub, active_cred, td)
1284 	struct file *fp;
1285 	struct stat *ub;
1286 	struct ucred *active_cred;
1287 	struct thread *td;
1288 {
1289 	struct pipe *pipe = fp->f_data;
1290 #ifdef MAC
1291 	int error;
1292 
1293 	PIPE_LOCK(pipe);
1294 	error = mac_check_pipe_stat(active_cred, pipe);
1295 	PIPE_UNLOCK(pipe);
1296 	if (error)
1297 		return (error);
1298 #endif
1299 	bzero(ub, sizeof(*ub));
1300 	ub->st_mode = S_IFIFO;
1301 	ub->st_blksize = pipe->pipe_buffer.size;
1302 	ub->st_size = pipe->pipe_buffer.cnt;
1303 	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1304 	ub->st_atimespec = pipe->pipe_atime;
1305 	ub->st_mtimespec = pipe->pipe_mtime;
1306 	ub->st_ctimespec = pipe->pipe_ctime;
1307 	ub->st_uid = fp->f_cred->cr_uid;
1308 	ub->st_gid = fp->f_cred->cr_gid;
1309 	/*
1310 	 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1311 	 * XXX (st_dev, st_ino) should be unique.
1312 	 */
1313 	return (0);
1314 }
1315 
1316 /* ARGSUSED */
1317 static int
1318 pipe_close(fp, td)
1319 	struct file *fp;
1320 	struct thread *td;
1321 {
1322 	struct pipe *cpipe = fp->f_data;
1323 
1324 	fp->f_ops = &badfileops;
1325 	fp->f_data = NULL;
1326 	funsetown(&cpipe->pipe_sigio);
1327 	pipeclose(cpipe);
1328 	return (0);
1329 }
1330 
1331 static void
1332 pipe_free_kmem(cpipe)
1333 	struct pipe *cpipe;
1334 {
1335 
1336 	GIANT_REQUIRED;
1337 	KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1338 	       ("pipespace: pipe mutex locked"));
1339 
1340 	if (cpipe->pipe_buffer.buffer != NULL) {
1341 		if (cpipe->pipe_buffer.size > PIPE_SIZE)
1342 			--nbigpipe;
1343 		atomic_subtract_int(&amountpipekva, cpipe->pipe_buffer.size);
1344 		kmem_free(kernel_map,
1345 			(vm_offset_t)cpipe->pipe_buffer.buffer,
1346 			cpipe->pipe_buffer.size);
1347 		cpipe->pipe_buffer.buffer = NULL;
1348 	}
1349 #ifndef PIPE_NODIRECT
1350 	if (cpipe->pipe_map.kva != 0) {
1351 		atomic_subtract_int(&amountpipekva,
1352 		    cpipe->pipe_buffer.size + PAGE_SIZE);
1353 		kmem_free(kernel_map,
1354 			cpipe->pipe_map.kva,
1355 			cpipe->pipe_buffer.size + PAGE_SIZE);
1356 		cpipe->pipe_map.cnt = 0;
1357 		cpipe->pipe_map.kva = 0;
1358 		cpipe->pipe_map.pos = 0;
1359 		cpipe->pipe_map.npages = 0;
1360 	}
1361 #endif
1362 }
1363 
1364 /*
1365  * shutdown the pipe
1366  */
1367 static void
1368 pipeclose(cpipe)
1369 	struct pipe *cpipe;
1370 {
1371 	struct pipe *ppipe;
1372 	int hadpeer;
1373 
1374 	if (cpipe == NULL)
1375 		return;
1376 
1377 	hadpeer = 0;
1378 
1379 	/* partially created pipes won't have a valid mutex. */
1380 	if (PIPE_MTX(cpipe) != NULL)
1381 		PIPE_LOCK(cpipe);
1382 
1383 	pipeselwakeup(cpipe);
1384 
1385 	/*
1386 	 * If the other side is blocked, wake it up saying that
1387 	 * we want to close it down.
1388 	 */
1389 	while (cpipe->pipe_busy) {
1390 		wakeup(cpipe);
1391 		cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
1392 		msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1393 	}
1394 
1395 #ifdef MAC
1396 	if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL)
1397 		mac_destroy_pipe(cpipe);
1398 #endif
1399 
1400 	/*
1401 	 * Disconnect from peer
1402 	 */
1403 	if ((ppipe = cpipe->pipe_peer) != NULL) {
1404 		hadpeer++;
1405 		pipeselwakeup(ppipe);
1406 
1407 		ppipe->pipe_state |= PIPE_EOF;
1408 		wakeup(ppipe);
1409 		KNOTE(&ppipe->pipe_sel.si_note, 0);
1410 		ppipe->pipe_peer = NULL;
1411 	}
1412 	/*
1413 	 * free resources
1414 	 */
1415 	if (PIPE_MTX(cpipe) != NULL) {
1416 		PIPE_UNLOCK(cpipe);
1417 		if (!hadpeer) {
1418 			mtx_destroy(PIPE_MTX(cpipe));
1419 			free(PIPE_MTX(cpipe), M_TEMP);
1420 		}
1421 	}
1422 	mtx_lock(&Giant);
1423 	pipe_free_kmem(cpipe);
1424 	uma_zfree(pipe_zone, cpipe);
1425 	mtx_unlock(&Giant);
1426 }
1427 
1428 /*ARGSUSED*/
1429 static int
1430 pipe_kqfilter(struct file *fp, struct knote *kn)
1431 {
1432 	struct pipe *cpipe;
1433 
1434 	cpipe = kn->kn_fp->f_data;
1435 	switch (kn->kn_filter) {
1436 	case EVFILT_READ:
1437 		kn->kn_fop = &pipe_rfiltops;
1438 		break;
1439 	case EVFILT_WRITE:
1440 		kn->kn_fop = &pipe_wfiltops;
1441 		cpipe = cpipe->pipe_peer;
1442 		if (cpipe == NULL)
1443 			/* other end of pipe has been closed */
1444 			return (EBADF);
1445 		break;
1446 	default:
1447 		return (1);
1448 	}
1449 	kn->kn_hook = cpipe;
1450 
1451 	PIPE_LOCK(cpipe);
1452 	SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1453 	PIPE_UNLOCK(cpipe);
1454 	return (0);
1455 }
1456 
1457 static void
1458 filt_pipedetach(struct knote *kn)
1459 {
1460 	struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1461 
1462 	PIPE_LOCK(cpipe);
1463 	SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1464 	PIPE_UNLOCK(cpipe);
1465 }
1466 
1467 /*ARGSUSED*/
1468 static int
1469 filt_piperead(struct knote *kn, long hint)
1470 {
1471 	struct pipe *rpipe = kn->kn_fp->f_data;
1472 	struct pipe *wpipe = rpipe->pipe_peer;
1473 
1474 	PIPE_LOCK(rpipe);
1475 	kn->kn_data = rpipe->pipe_buffer.cnt;
1476 	if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1477 		kn->kn_data = rpipe->pipe_map.cnt;
1478 
1479 	if ((rpipe->pipe_state & PIPE_EOF) ||
1480 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1481 		kn->kn_flags |= EV_EOF;
1482 		PIPE_UNLOCK(rpipe);
1483 		return (1);
1484 	}
1485 	PIPE_UNLOCK(rpipe);
1486 	return (kn->kn_data > 0);
1487 }
1488 
1489 /*ARGSUSED*/
1490 static int
1491 filt_pipewrite(struct knote *kn, long hint)
1492 {
1493 	struct pipe *rpipe = kn->kn_fp->f_data;
1494 	struct pipe *wpipe = rpipe->pipe_peer;
1495 
1496 	PIPE_LOCK(rpipe);
1497 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1498 		kn->kn_data = 0;
1499 		kn->kn_flags |= EV_EOF;
1500 		PIPE_UNLOCK(rpipe);
1501 		return (1);
1502 	}
1503 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1504 	if (wpipe->pipe_state & PIPE_DIRECTW)
1505 		kn->kn_data = 0;
1506 
1507 	PIPE_UNLOCK(rpipe);
1508 	return (kn->kn_data >= PIPE_BUF);
1509 }
1510