xref: /freebsd/sys/kern/sys_pipe.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Copyright (c) 1996 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD$
20  */
21 
22 /*
23  * This file contains a high-performance replacement for the socket-based
24  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
25  * all features of sockets, but does do everything that pipes normally
26  * do.
27  */
28 
29 /*
30  * This code has two modes of operation, a small write mode and a large
31  * write mode.  The small write mode acts like conventional pipes with
32  * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
33  * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
34  * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
35  * the receiving process can copy it directly from the pages in the sending
36  * process.
37  *
38  * If the sending process receives a signal, it is possible that it will
39  * go away, and certainly its address space can change, because control
40  * is returned back to the user-mode side.  In that case, the pipe code
41  * arranges to copy the buffer supplied by the user process, to a pageable
42  * kernel buffer, and the receiving process will grab the data from the
43  * pageable kernel buffer.  Since signals don't happen all that often,
44  * the copy operation is normally eliminated.
45  *
46  * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
47  * happen for small transfers so that the system will not spend all of
48  * its time context switching.  PIPE_SIZE is constrained by the
49  * amount of kernel virtual memory.
50  */
51 
52 #include "opt_mac.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/fcntl.h>
57 #include <sys/file.h>
58 #include <sys/filedesc.h>
59 #include <sys/filio.h>
60 #include <sys/kernel.h>
61 #include <sys/lock.h>
62 #include <sys/mac.h>
63 #include <sys/mutex.h>
64 #include <sys/ttycom.h>
65 #include <sys/stat.h>
66 #include <sys/malloc.h>
67 #include <sys/poll.h>
68 #include <sys/selinfo.h>
69 #include <sys/signalvar.h>
70 #include <sys/sysproto.h>
71 #include <sys/pipe.h>
72 #include <sys/proc.h>
73 #include <sys/vnode.h>
74 #include <sys/uio.h>
75 #include <sys/event.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_extern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/uma.h>
86 
87 /*
88  * Use this define if you want to disable *fancy* VM things.  Expect an
89  * approx 30% decrease in transfer rate.  This could be useful for
90  * NetBSD or OpenBSD.
91  */
92 /* #define PIPE_NODIRECT */
93 
94 /*
95  * interfaces to the outside world
96  */
97 static int pipe_read(struct file *fp, struct uio *uio,
98 		struct ucred *active_cred, int flags, struct thread *td);
99 static int pipe_write(struct file *fp, struct uio *uio,
100 		struct ucred *active_cred, int flags, struct thread *td);
101 static int pipe_close(struct file *fp, struct thread *td);
102 static int pipe_poll(struct file *fp, int events, struct ucred *active_cred,
103 		struct thread *td);
104 static int pipe_kqfilter(struct file *fp, struct knote *kn);
105 static int pipe_stat(struct file *fp, struct stat *sb,
106 		struct ucred *active_cred, struct thread *td);
107 static int pipe_ioctl(struct file *fp, u_long cmd, void *data,
108 		struct ucred *active_cred, struct thread *td);
109 
110 static struct fileops pipeops = {
111 	pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
112 	pipe_stat, pipe_close
113 };
114 
115 static void	filt_pipedetach(struct knote *kn);
116 static int	filt_piperead(struct knote *kn, long hint);
117 static int	filt_pipewrite(struct knote *kn, long hint);
118 
119 static struct filterops pipe_rfiltops =
120 	{ 1, NULL, filt_pipedetach, filt_piperead };
121 static struct filterops pipe_wfiltops =
122 	{ 1, NULL, filt_pipedetach, filt_pipewrite };
123 
124 #define PIPE_GET_GIANT(pipe)						\
125 	do {								\
126 		KASSERT(((pipe)->pipe_state & PIPE_LOCKFL) != 0,	\
127 		    ("%s:%d PIPE_GET_GIANT: line pipe not locked",	\
128 		     __FILE__, __LINE__));				\
129 		PIPE_UNLOCK(pipe);					\
130 		mtx_lock(&Giant);					\
131 	} while (0)
132 
133 #define PIPE_DROP_GIANT(pipe)						\
134 	do {								\
135 		mtx_unlock(&Giant);					\
136 		PIPE_LOCK(pipe);					\
137 	} while (0)
138 
139 /*
140  * Default pipe buffer size(s), this can be kind-of large now because pipe
141  * space is pageable.  The pipe code will try to maintain locality of
142  * reference for performance reasons, so small amounts of outstanding I/O
143  * will not wipe the cache.
144  */
145 #define MINPIPESIZE (PIPE_SIZE/3)
146 #define MAXPIPESIZE (2*PIPE_SIZE/3)
147 
148 /*
149  * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
150  * is there so that on large systems, we don't exhaust it.
151  */
152 #define MAXPIPEKVA (8*1024*1024)
153 
154 /*
155  * Limit for direct transfers, we cannot, of course limit
156  * the amount of kva for pipes in general though.
157  */
158 #define LIMITPIPEKVA (16*1024*1024)
159 
160 /*
161  * Limit the number of "big" pipes
162  */
163 #define LIMITBIGPIPES	32
164 static int nbigpipe;
165 
166 static int amountpipekva;
167 
168 static void pipeinit(void *dummy __unused);
169 static void pipeclose(struct pipe *cpipe);
170 static void pipe_free_kmem(struct pipe *cpipe);
171 static int pipe_create(struct pipe **cpipep);
172 static __inline int pipelock(struct pipe *cpipe, int catch);
173 static __inline void pipeunlock(struct pipe *cpipe);
174 static __inline void pipeselwakeup(struct pipe *cpipe);
175 #ifndef PIPE_NODIRECT
176 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
177 static void pipe_destroy_write_buffer(struct pipe *wpipe);
178 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
179 static void pipe_clone_write_buffer(struct pipe *wpipe);
180 #endif
181 static int pipespace(struct pipe *cpipe, int size);
182 
183 static uma_zone_t pipe_zone;
184 
185 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
186 
187 static void
188 pipeinit(void *dummy __unused)
189 {
190 	pipe_zone = uma_zcreate("PIPE", sizeof(struct pipe), NULL,
191 	    NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
192 }
193 
194 /*
195  * The pipe system call for the DTYPE_PIPE type of pipes
196  */
197 
198 /* ARGSUSED */
199 int
200 pipe(td, uap)
201 	struct thread *td;
202 	struct pipe_args /* {
203 		int	dummy;
204 	} */ *uap;
205 {
206 	struct filedesc *fdp = td->td_proc->p_fd;
207 	struct file *rf, *wf;
208 	struct pipe *rpipe, *wpipe;
209 	struct mtx *pmtx;
210 	int fd, error;
211 
212 	KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
213 
214 	pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
215 
216 	rpipe = wpipe = NULL;
217 	if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
218 		pipeclose(rpipe);
219 		pipeclose(wpipe);
220 		free(pmtx, M_TEMP);
221 		return (ENFILE);
222 	}
223 
224 	rpipe->pipe_state |= PIPE_DIRECTOK;
225 	wpipe->pipe_state |= PIPE_DIRECTOK;
226 
227 	error = falloc(td, &rf, &fd);
228 	if (error) {
229 		pipeclose(rpipe);
230 		pipeclose(wpipe);
231 		free(pmtx, M_TEMP);
232 		return (error);
233 	}
234 	fhold(rf);
235 	td->td_retval[0] = fd;
236 
237 	/*
238 	 * Warning: once we've gotten past allocation of the fd for the
239 	 * read-side, we can only drop the read side via fdrop() in order
240 	 * to avoid races against processes which manage to dup() the read
241 	 * side while we are blocked trying to allocate the write side.
242 	 */
243 	FILE_LOCK(rf);
244 	rf->f_flag = FREAD | FWRITE;
245 	rf->f_type = DTYPE_PIPE;
246 	rf->f_data = rpipe;
247 	rf->f_ops = &pipeops;
248 	FILE_UNLOCK(rf);
249 	error = falloc(td, &wf, &fd);
250 	if (error) {
251 		FILEDESC_LOCK(fdp);
252 		if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
253 			fdp->fd_ofiles[td->td_retval[0]] = NULL;
254 			FILEDESC_UNLOCK(fdp);
255 			fdrop(rf, td);
256 		} else
257 			FILEDESC_UNLOCK(fdp);
258 		fdrop(rf, td);
259 		/* rpipe has been closed by fdrop(). */
260 		pipeclose(wpipe);
261 		free(pmtx, M_TEMP);
262 		return (error);
263 	}
264 	FILE_LOCK(wf);
265 	wf->f_flag = FREAD | FWRITE;
266 	wf->f_type = DTYPE_PIPE;
267 	wf->f_data = wpipe;
268 	wf->f_ops = &pipeops;
269 	FILE_UNLOCK(wf);
270 	td->td_retval[1] = fd;
271 	rpipe->pipe_peer = wpipe;
272 	wpipe->pipe_peer = rpipe;
273 #ifdef MAC
274 	/*
275 	 * struct pipe represents a pipe endpoint.  The MAC label is shared
276 	 * between the connected endpoints.  As a result mac_init_pipe() and
277 	 * mac_create_pipe() should only be called on one of the endpoints
278 	 * after they have been connected.
279 	 */
280 	mac_init_pipe(rpipe);
281 	mac_create_pipe(td->td_ucred, rpipe);
282 #endif
283 	mtx_init(pmtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
284 	rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
285 	fdrop(rf, td);
286 
287 	return (0);
288 }
289 
290 /*
291  * Allocate kva for pipe circular buffer, the space is pageable
292  * This routine will 'realloc' the size of a pipe safely, if it fails
293  * it will retain the old buffer.
294  * If it fails it will return ENOMEM.
295  */
296 static int
297 pipespace(cpipe, size)
298 	struct pipe *cpipe;
299 	int size;
300 {
301 	struct vm_object *object;
302 	caddr_t buffer;
303 	int npages, error;
304 
305 	GIANT_REQUIRED;
306 	KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
307 	       ("pipespace: pipe mutex locked"));
308 
309 	npages = round_page(size)/PAGE_SIZE;
310 	/*
311 	 * Create an object, I don't like the idea of paging to/from
312 	 * kernel_object.
313 	 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
314 	 */
315 	object = vm_object_allocate(OBJT_DEFAULT, npages);
316 	buffer = (caddr_t) vm_map_min(kernel_map);
317 
318 	/*
319 	 * Insert the object into the kernel map, and allocate kva for it.
320 	 * The map entry is, by default, pageable.
321 	 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
322 	 */
323 	error = vm_map_find(kernel_map, object, 0,
324 		(vm_offset_t *) &buffer, size, 1,
325 		VM_PROT_ALL, VM_PROT_ALL, 0);
326 
327 	if (error != KERN_SUCCESS) {
328 		vm_object_deallocate(object);
329 		return (ENOMEM);
330 	}
331 
332 	/* free old resources if we're resizing */
333 	pipe_free_kmem(cpipe);
334 	cpipe->pipe_buffer.object = object;
335 	cpipe->pipe_buffer.buffer = buffer;
336 	cpipe->pipe_buffer.size = size;
337 	cpipe->pipe_buffer.in = 0;
338 	cpipe->pipe_buffer.out = 0;
339 	cpipe->pipe_buffer.cnt = 0;
340 	amountpipekva += cpipe->pipe_buffer.size;
341 	return (0);
342 }
343 
344 /*
345  * initialize and allocate VM and memory for pipe
346  */
347 static int
348 pipe_create(cpipep)
349 	struct pipe **cpipep;
350 {
351 	struct pipe *cpipe;
352 	int error;
353 
354 	*cpipep = uma_zalloc(pipe_zone, M_WAITOK);
355 	if (*cpipep == NULL)
356 		return (ENOMEM);
357 
358 	cpipe = *cpipep;
359 
360 	/* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
361 	cpipe->pipe_buffer.object = NULL;
362 #ifndef PIPE_NODIRECT
363 	cpipe->pipe_map.kva = 0;
364 #endif
365 	/*
366 	 * protect so pipeclose() doesn't follow a junk pointer
367 	 * if pipespace() fails.
368 	 */
369 	bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel));
370 	cpipe->pipe_state = 0;
371 	cpipe->pipe_peer = NULL;
372 	cpipe->pipe_busy = 0;
373 
374 #ifndef PIPE_NODIRECT
375 	/*
376 	 * pipe data structure initializations to support direct pipe I/O
377 	 */
378 	cpipe->pipe_map.cnt = 0;
379 	cpipe->pipe_map.kva = 0;
380 	cpipe->pipe_map.pos = 0;
381 	cpipe->pipe_map.npages = 0;
382 	/* cpipe->pipe_map.ms[] = invalid */
383 #endif
384 
385 	cpipe->pipe_mtxp = NULL;	/* avoid pipespace assertion */
386 	error = pipespace(cpipe, PIPE_SIZE);
387 	if (error)
388 		return (error);
389 
390 	vfs_timestamp(&cpipe->pipe_ctime);
391 	cpipe->pipe_atime = cpipe->pipe_ctime;
392 	cpipe->pipe_mtime = cpipe->pipe_ctime;
393 
394 	return (0);
395 }
396 
397 
398 /*
399  * lock a pipe for I/O, blocking other access
400  */
401 static __inline int
402 pipelock(cpipe, catch)
403 	struct pipe *cpipe;
404 	int catch;
405 {
406 	int error;
407 
408 	PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
409 	while (cpipe->pipe_state & PIPE_LOCKFL) {
410 		cpipe->pipe_state |= PIPE_LWANT;
411 		error = msleep(cpipe, PIPE_MTX(cpipe),
412 		    catch ? (PRIBIO | PCATCH) : PRIBIO,
413 		    "pipelk", 0);
414 		if (error != 0)
415 			return (error);
416 	}
417 	cpipe->pipe_state |= PIPE_LOCKFL;
418 	return (0);
419 }
420 
421 /*
422  * unlock a pipe I/O lock
423  */
424 static __inline void
425 pipeunlock(cpipe)
426 	struct pipe *cpipe;
427 {
428 
429 	PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
430 	cpipe->pipe_state &= ~PIPE_LOCKFL;
431 	if (cpipe->pipe_state & PIPE_LWANT) {
432 		cpipe->pipe_state &= ~PIPE_LWANT;
433 		wakeup(cpipe);
434 	}
435 }
436 
437 static __inline void
438 pipeselwakeup(cpipe)
439 	struct pipe *cpipe;
440 {
441 
442 	if (cpipe->pipe_state & PIPE_SEL) {
443 		cpipe->pipe_state &= ~PIPE_SEL;
444 		selwakeup(&cpipe->pipe_sel);
445 	}
446 	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
447 		pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
448 	KNOTE(&cpipe->pipe_sel.si_note, 0);
449 }
450 
451 /* ARGSUSED */
452 static int
453 pipe_read(fp, uio, active_cred, flags, td)
454 	struct file *fp;
455 	struct uio *uio;
456 	struct ucred *active_cred;
457 	struct thread *td;
458 	int flags;
459 {
460 	struct pipe *rpipe = (struct pipe *) fp->f_data;
461 	int error;
462 	int nread = 0;
463 	u_int size;
464 
465 	PIPE_LOCK(rpipe);
466 	++rpipe->pipe_busy;
467 	error = pipelock(rpipe, 1);
468 	if (error)
469 		goto unlocked_error;
470 
471 #ifdef MAC
472 	error = mac_check_pipe_read(active_cred, rpipe);
473 	if (error)
474 		goto locked_error;
475 #endif
476 
477 	while (uio->uio_resid) {
478 		/*
479 		 * normal pipe buffer receive
480 		 */
481 		if (rpipe->pipe_buffer.cnt > 0) {
482 			size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
483 			if (size > rpipe->pipe_buffer.cnt)
484 				size = rpipe->pipe_buffer.cnt;
485 			if (size > (u_int) uio->uio_resid)
486 				size = (u_int) uio->uio_resid;
487 
488 			PIPE_UNLOCK(rpipe);
489 			error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
490 					size, uio);
491 			PIPE_LOCK(rpipe);
492 			if (error)
493 				break;
494 
495 			rpipe->pipe_buffer.out += size;
496 			if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
497 				rpipe->pipe_buffer.out = 0;
498 
499 			rpipe->pipe_buffer.cnt -= size;
500 
501 			/*
502 			 * If there is no more to read in the pipe, reset
503 			 * its pointers to the beginning.  This improves
504 			 * cache hit stats.
505 			 */
506 			if (rpipe->pipe_buffer.cnt == 0) {
507 				rpipe->pipe_buffer.in = 0;
508 				rpipe->pipe_buffer.out = 0;
509 			}
510 			nread += size;
511 #ifndef PIPE_NODIRECT
512 		/*
513 		 * Direct copy, bypassing a kernel buffer.
514 		 */
515 		} else if ((size = rpipe->pipe_map.cnt) &&
516 			   (rpipe->pipe_state & PIPE_DIRECTW)) {
517 			caddr_t	va;
518 			if (size > (u_int) uio->uio_resid)
519 				size = (u_int) uio->uio_resid;
520 
521 			va = (caddr_t) rpipe->pipe_map.kva +
522 			    rpipe->pipe_map.pos;
523 			PIPE_UNLOCK(rpipe);
524 			error = uiomove(va, size, uio);
525 			PIPE_LOCK(rpipe);
526 			if (error)
527 				break;
528 			nread += size;
529 			rpipe->pipe_map.pos += size;
530 			rpipe->pipe_map.cnt -= size;
531 			if (rpipe->pipe_map.cnt == 0) {
532 				rpipe->pipe_state &= ~PIPE_DIRECTW;
533 				wakeup(rpipe);
534 			}
535 #endif
536 		} else {
537 			/*
538 			 * detect EOF condition
539 			 * read returns 0 on EOF, no need to set error
540 			 */
541 			if (rpipe->pipe_state & PIPE_EOF)
542 				break;
543 
544 			/*
545 			 * If the "write-side" has been blocked, wake it up now.
546 			 */
547 			if (rpipe->pipe_state & PIPE_WANTW) {
548 				rpipe->pipe_state &= ~PIPE_WANTW;
549 				wakeup(rpipe);
550 			}
551 
552 			/*
553 			 * Break if some data was read.
554 			 */
555 			if (nread > 0)
556 				break;
557 
558 			/*
559 			 * Unlock the pipe buffer for our remaining processing.  We
560 			 * will either break out with an error or we will sleep and
561 			 * relock to loop.
562 			 */
563 			pipeunlock(rpipe);
564 
565 			/*
566 			 * Handle non-blocking mode operation or
567 			 * wait for more data.
568 			 */
569 			if (fp->f_flag & FNONBLOCK) {
570 				error = EAGAIN;
571 			} else {
572 				rpipe->pipe_state |= PIPE_WANTR;
573 				if ((error = msleep(rpipe, PIPE_MTX(rpipe),
574 				    PRIBIO | PCATCH,
575 				    "piperd", 0)) == 0)
576 					error = pipelock(rpipe, 1);
577 			}
578 			if (error)
579 				goto unlocked_error;
580 		}
581 	}
582 #ifdef MAC
583 locked_error:
584 #endif
585 	pipeunlock(rpipe);
586 
587 	/* XXX: should probably do this before getting any locks. */
588 	if (error == 0)
589 		vfs_timestamp(&rpipe->pipe_atime);
590 unlocked_error:
591 	--rpipe->pipe_busy;
592 
593 	/*
594 	 * PIPE_WANT processing only makes sense if pipe_busy is 0.
595 	 */
596 	if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
597 		rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
598 		wakeup(rpipe);
599 	} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
600 		/*
601 		 * Handle write blocking hysteresis.
602 		 */
603 		if (rpipe->pipe_state & PIPE_WANTW) {
604 			rpipe->pipe_state &= ~PIPE_WANTW;
605 			wakeup(rpipe);
606 		}
607 	}
608 
609 	if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
610 		pipeselwakeup(rpipe);
611 
612 	PIPE_UNLOCK(rpipe);
613 	return (error);
614 }
615 
616 #ifndef PIPE_NODIRECT
617 /*
618  * Map the sending processes' buffer into kernel space and wire it.
619  * This is similar to a physical write operation.
620  */
621 static int
622 pipe_build_write_buffer(wpipe, uio)
623 	struct pipe *wpipe;
624 	struct uio *uio;
625 {
626 	u_int size;
627 	int i;
628 	vm_offset_t addr, endaddr, paddr;
629 
630 	GIANT_REQUIRED;
631 	PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
632 
633 	size = (u_int) uio->uio_iov->iov_len;
634 	if (size > wpipe->pipe_buffer.size)
635 		size = wpipe->pipe_buffer.size;
636 
637 	endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
638 	addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
639 	for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
640 		vm_page_t m;
641 
642 		/*
643 		 * vm_fault_quick() can sleep.  Consequently,
644 		 * vm_page_lock_queue() and vm_page_unlock_queue()
645 		 * should not be performed outside of this loop.
646 		 */
647 		if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
648 		    (paddr = pmap_extract(vmspace_pmap(curproc->p_vmspace),
649 		     addr)) == 0) {
650 			int j;
651 
652 			vm_page_lock_queues();
653 			for (j = 0; j < i; j++)
654 				vm_page_unwire(wpipe->pipe_map.ms[j], 1);
655 			vm_page_unlock_queues();
656 			return (EFAULT);
657 		}
658 
659 		m = PHYS_TO_VM_PAGE(paddr);
660 		vm_page_lock_queues();
661 		vm_page_wire(m);
662 		vm_page_unlock_queues();
663 		wpipe->pipe_map.ms[i] = m;
664 	}
665 
666 /*
667  * set up the control block
668  */
669 	wpipe->pipe_map.npages = i;
670 	wpipe->pipe_map.pos =
671 	    ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
672 	wpipe->pipe_map.cnt = size;
673 
674 /*
675  * and map the buffer
676  */
677 	if (wpipe->pipe_map.kva == 0) {
678 		/*
679 		 * We need to allocate space for an extra page because the
680 		 * address range might (will) span pages at times.
681 		 */
682 		wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
683 			wpipe->pipe_buffer.size + PAGE_SIZE);
684 		amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
685 	}
686 	pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
687 		wpipe->pipe_map.npages);
688 
689 /*
690  * and update the uio data
691  */
692 
693 	uio->uio_iov->iov_len -= size;
694 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
695 	if (uio->uio_iov->iov_len == 0)
696 		uio->uio_iov++;
697 	uio->uio_resid -= size;
698 	uio->uio_offset += size;
699 	return (0);
700 }
701 
702 /*
703  * unmap and unwire the process buffer
704  */
705 static void
706 pipe_destroy_write_buffer(wpipe)
707 	struct pipe *wpipe;
708 {
709 	int i;
710 
711 	GIANT_REQUIRED;
712 	PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
713 
714 	if (wpipe->pipe_map.kva) {
715 		pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
716 
717 		if (amountpipekva > MAXPIPEKVA) {
718 			vm_offset_t kva = wpipe->pipe_map.kva;
719 			wpipe->pipe_map.kva = 0;
720 			kmem_free(kernel_map, kva,
721 				wpipe->pipe_buffer.size + PAGE_SIZE);
722 			amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
723 		}
724 	}
725 	vm_page_lock_queues();
726 	for (i = 0; i < wpipe->pipe_map.npages; i++)
727 		vm_page_unwire(wpipe->pipe_map.ms[i], 1);
728 	vm_page_unlock_queues();
729 	wpipe->pipe_map.npages = 0;
730 }
731 
732 /*
733  * In the case of a signal, the writing process might go away.  This
734  * code copies the data into the circular buffer so that the source
735  * pages can be freed without loss of data.
736  */
737 static void
738 pipe_clone_write_buffer(wpipe)
739 	struct pipe *wpipe;
740 {
741 	int size;
742 	int pos;
743 
744 	PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
745 	size = wpipe->pipe_map.cnt;
746 	pos = wpipe->pipe_map.pos;
747 
748 	wpipe->pipe_buffer.in = size;
749 	wpipe->pipe_buffer.out = 0;
750 	wpipe->pipe_buffer.cnt = size;
751 	wpipe->pipe_state &= ~PIPE_DIRECTW;
752 
753 	PIPE_GET_GIANT(wpipe);
754 	bcopy((caddr_t) wpipe->pipe_map.kva + pos,
755 	    wpipe->pipe_buffer.buffer, size);
756 	pipe_destroy_write_buffer(wpipe);
757 	PIPE_DROP_GIANT(wpipe);
758 }
759 
760 /*
761  * This implements the pipe buffer write mechanism.  Note that only
762  * a direct write OR a normal pipe write can be pending at any given time.
763  * If there are any characters in the pipe buffer, the direct write will
764  * be deferred until the receiving process grabs all of the bytes from
765  * the pipe buffer.  Then the direct mapping write is set-up.
766  */
767 static int
768 pipe_direct_write(wpipe, uio)
769 	struct pipe *wpipe;
770 	struct uio *uio;
771 {
772 	int error;
773 
774 retry:
775 	PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
776 	while (wpipe->pipe_state & PIPE_DIRECTW) {
777 		if (wpipe->pipe_state & PIPE_WANTR) {
778 			wpipe->pipe_state &= ~PIPE_WANTR;
779 			wakeup(wpipe);
780 		}
781 		wpipe->pipe_state |= PIPE_WANTW;
782 		error = msleep(wpipe, PIPE_MTX(wpipe),
783 		    PRIBIO | PCATCH, "pipdww", 0);
784 		if (error)
785 			goto error1;
786 		if (wpipe->pipe_state & PIPE_EOF) {
787 			error = EPIPE;
788 			goto error1;
789 		}
790 	}
791 	wpipe->pipe_map.cnt = 0;	/* transfer not ready yet */
792 	if (wpipe->pipe_buffer.cnt > 0) {
793 		if (wpipe->pipe_state & PIPE_WANTR) {
794 			wpipe->pipe_state &= ~PIPE_WANTR;
795 			wakeup(wpipe);
796 		}
797 
798 		wpipe->pipe_state |= PIPE_WANTW;
799 		error = msleep(wpipe, PIPE_MTX(wpipe),
800 		    PRIBIO | PCATCH, "pipdwc", 0);
801 		if (error)
802 			goto error1;
803 		if (wpipe->pipe_state & PIPE_EOF) {
804 			error = EPIPE;
805 			goto error1;
806 		}
807 		goto retry;
808 	}
809 
810 	wpipe->pipe_state |= PIPE_DIRECTW;
811 
812 	pipelock(wpipe, 0);
813 	PIPE_GET_GIANT(wpipe);
814 	error = pipe_build_write_buffer(wpipe, uio);
815 	PIPE_DROP_GIANT(wpipe);
816 	pipeunlock(wpipe);
817 	if (error) {
818 		wpipe->pipe_state &= ~PIPE_DIRECTW;
819 		goto error1;
820 	}
821 
822 	error = 0;
823 	while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
824 		if (wpipe->pipe_state & PIPE_EOF) {
825 			pipelock(wpipe, 0);
826 			PIPE_GET_GIANT(wpipe);
827 			pipe_destroy_write_buffer(wpipe);
828 			PIPE_DROP_GIANT(wpipe);
829 			pipeunlock(wpipe);
830 			pipeselwakeup(wpipe);
831 			error = EPIPE;
832 			goto error1;
833 		}
834 		if (wpipe->pipe_state & PIPE_WANTR) {
835 			wpipe->pipe_state &= ~PIPE_WANTR;
836 			wakeup(wpipe);
837 		}
838 		pipeselwakeup(wpipe);
839 		error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
840 		    "pipdwt", 0);
841 	}
842 
843 	pipelock(wpipe,0);
844 	if (wpipe->pipe_state & PIPE_DIRECTW) {
845 		/*
846 		 * this bit of trickery substitutes a kernel buffer for
847 		 * the process that might be going away.
848 		 */
849 		pipe_clone_write_buffer(wpipe);
850 	} else {
851 		PIPE_GET_GIANT(wpipe);
852 		pipe_destroy_write_buffer(wpipe);
853 		PIPE_DROP_GIANT(wpipe);
854 	}
855 	pipeunlock(wpipe);
856 	return (error);
857 
858 error1:
859 	wakeup(wpipe);
860 	return (error);
861 }
862 #endif
863 
864 static int
865 pipe_write(fp, uio, active_cred, flags, td)
866 	struct file *fp;
867 	struct uio *uio;
868 	struct ucred *active_cred;
869 	struct thread *td;
870 	int flags;
871 {
872 	int error = 0;
873 	int orig_resid;
874 	struct pipe *wpipe, *rpipe;
875 
876 	rpipe = (struct pipe *) fp->f_data;
877 	wpipe = rpipe->pipe_peer;
878 
879 	PIPE_LOCK(rpipe);
880 	/*
881 	 * detect loss of pipe read side, issue SIGPIPE if lost.
882 	 */
883 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
884 		PIPE_UNLOCK(rpipe);
885 		return (EPIPE);
886 	}
887 #ifdef MAC
888 	error = mac_check_pipe_write(active_cred, wpipe);
889 	if (error) {
890 		PIPE_UNLOCK(rpipe);
891 		return (error);
892 	}
893 #endif
894 	++wpipe->pipe_busy;
895 
896 	/*
897 	 * If it is advantageous to resize the pipe buffer, do
898 	 * so.
899 	 */
900 	if ((uio->uio_resid > PIPE_SIZE) &&
901 		(nbigpipe < LIMITBIGPIPES) &&
902 		(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
903 		(wpipe->pipe_buffer.size <= PIPE_SIZE) &&
904 		(wpipe->pipe_buffer.cnt == 0)) {
905 
906 		if ((error = pipelock(wpipe, 1)) == 0) {
907 			PIPE_GET_GIANT(wpipe);
908 			if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
909 				nbigpipe++;
910 			PIPE_DROP_GIANT(wpipe);
911 			pipeunlock(wpipe);
912 		}
913 	}
914 
915 	/*
916 	 * If an early error occured unbusy and return, waking up any pending
917 	 * readers.
918 	 */
919 	if (error) {
920 		--wpipe->pipe_busy;
921 		if ((wpipe->pipe_busy == 0) &&
922 		    (wpipe->pipe_state & PIPE_WANT)) {
923 			wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
924 			wakeup(wpipe);
925 		}
926 		PIPE_UNLOCK(rpipe);
927 		return(error);
928 	}
929 
930 	orig_resid = uio->uio_resid;
931 
932 	while (uio->uio_resid) {
933 		int space;
934 
935 #ifndef PIPE_NODIRECT
936 		/*
937 		 * If the transfer is large, we can gain performance if
938 		 * we do process-to-process copies directly.
939 		 * If the write is non-blocking, we don't use the
940 		 * direct write mechanism.
941 		 *
942 		 * The direct write mechanism will detect the reader going
943 		 * away on us.
944 		 */
945 		if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
946 		    (fp->f_flag & FNONBLOCK) == 0 &&
947 			(wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
948 			(uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
949 			error = pipe_direct_write(wpipe, uio);
950 			if (error)
951 				break;
952 			continue;
953 		}
954 #endif
955 
956 		/*
957 		 * Pipe buffered writes cannot be coincidental with
958 		 * direct writes.  We wait until the currently executing
959 		 * direct write is completed before we start filling the
960 		 * pipe buffer.  We break out if a signal occurs or the
961 		 * reader goes away.
962 		 */
963 	retrywrite:
964 		while (wpipe->pipe_state & PIPE_DIRECTW) {
965 			if (wpipe->pipe_state & PIPE_WANTR) {
966 				wpipe->pipe_state &= ~PIPE_WANTR;
967 				wakeup(wpipe);
968 			}
969 			error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
970 			    "pipbww", 0);
971 			if (wpipe->pipe_state & PIPE_EOF)
972 				break;
973 			if (error)
974 				break;
975 		}
976 		if (wpipe->pipe_state & PIPE_EOF) {
977 			error = EPIPE;
978 			break;
979 		}
980 
981 		space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
982 
983 		/* Writes of size <= PIPE_BUF must be atomic. */
984 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
985 			space = 0;
986 
987 		if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
988 			if ((error = pipelock(wpipe,1)) == 0) {
989 				int size;	/* Transfer size */
990 				int segsize;	/* first segment to transfer */
991 
992 				/*
993 				 * It is possible for a direct write to
994 				 * slip in on us... handle it here...
995 				 */
996 				if (wpipe->pipe_state & PIPE_DIRECTW) {
997 					pipeunlock(wpipe);
998 					goto retrywrite;
999 				}
1000 				/*
1001 				 * If a process blocked in uiomove, our
1002 				 * value for space might be bad.
1003 				 *
1004 				 * XXX will we be ok if the reader has gone
1005 				 * away here?
1006 				 */
1007 				if (space > wpipe->pipe_buffer.size -
1008 				    wpipe->pipe_buffer.cnt) {
1009 					pipeunlock(wpipe);
1010 					goto retrywrite;
1011 				}
1012 
1013 				/*
1014 				 * Transfer size is minimum of uio transfer
1015 				 * and free space in pipe buffer.
1016 				 */
1017 				if (space > uio->uio_resid)
1018 					size = uio->uio_resid;
1019 				else
1020 					size = space;
1021 				/*
1022 				 * First segment to transfer is minimum of
1023 				 * transfer size and contiguous space in
1024 				 * pipe buffer.  If first segment to transfer
1025 				 * is less than the transfer size, we've got
1026 				 * a wraparound in the buffer.
1027 				 */
1028 				segsize = wpipe->pipe_buffer.size -
1029 					wpipe->pipe_buffer.in;
1030 				if (segsize > size)
1031 					segsize = size;
1032 
1033 				/* Transfer first segment */
1034 
1035 				PIPE_UNLOCK(rpipe);
1036 				error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1037 						segsize, uio);
1038 				PIPE_LOCK(rpipe);
1039 
1040 				if (error == 0 && segsize < size) {
1041 					/*
1042 					 * Transfer remaining part now, to
1043 					 * support atomic writes.  Wraparound
1044 					 * happened.
1045 					 */
1046 					if (wpipe->pipe_buffer.in + segsize !=
1047 					    wpipe->pipe_buffer.size)
1048 						panic("Expected pipe buffer wraparound disappeared");
1049 
1050 					PIPE_UNLOCK(rpipe);
1051 					error = uiomove(&wpipe->pipe_buffer.buffer[0],
1052 							size - segsize, uio);
1053 					PIPE_LOCK(rpipe);
1054 				}
1055 				if (error == 0) {
1056 					wpipe->pipe_buffer.in += size;
1057 					if (wpipe->pipe_buffer.in >=
1058 					    wpipe->pipe_buffer.size) {
1059 						if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1060 							panic("Expected wraparound bad");
1061 						wpipe->pipe_buffer.in = size - segsize;
1062 					}
1063 
1064 					wpipe->pipe_buffer.cnt += size;
1065 					if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1066 						panic("Pipe buffer overflow");
1067 
1068 				}
1069 				pipeunlock(wpipe);
1070 			}
1071 			if (error)
1072 				break;
1073 
1074 		} else {
1075 			/*
1076 			 * If the "read-side" has been blocked, wake it up now.
1077 			 */
1078 			if (wpipe->pipe_state & PIPE_WANTR) {
1079 				wpipe->pipe_state &= ~PIPE_WANTR;
1080 				wakeup(wpipe);
1081 			}
1082 
1083 			/*
1084 			 * don't block on non-blocking I/O
1085 			 */
1086 			if (fp->f_flag & FNONBLOCK) {
1087 				error = EAGAIN;
1088 				break;
1089 			}
1090 
1091 			/*
1092 			 * We have no more space and have something to offer,
1093 			 * wake up select/poll.
1094 			 */
1095 			pipeselwakeup(wpipe);
1096 
1097 			wpipe->pipe_state |= PIPE_WANTW;
1098 			error = msleep(wpipe, PIPE_MTX(rpipe),
1099 			    PRIBIO | PCATCH, "pipewr", 0);
1100 			if (error != 0)
1101 				break;
1102 			/*
1103 			 * If read side wants to go away, we just issue a signal
1104 			 * to ourselves.
1105 			 */
1106 			if (wpipe->pipe_state & PIPE_EOF) {
1107 				error = EPIPE;
1108 				break;
1109 			}
1110 		}
1111 	}
1112 
1113 	--wpipe->pipe_busy;
1114 
1115 	if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1116 		wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1117 		wakeup(wpipe);
1118 	} else if (wpipe->pipe_buffer.cnt > 0) {
1119 		/*
1120 		 * If we have put any characters in the buffer, we wake up
1121 		 * the reader.
1122 		 */
1123 		if (wpipe->pipe_state & PIPE_WANTR) {
1124 			wpipe->pipe_state &= ~PIPE_WANTR;
1125 			wakeup(wpipe);
1126 		}
1127 	}
1128 
1129 	/*
1130 	 * Don't return EPIPE if I/O was successful
1131 	 */
1132 	if ((wpipe->pipe_buffer.cnt == 0) &&
1133 	    (uio->uio_resid == 0) &&
1134 	    (error == EPIPE)) {
1135 		error = 0;
1136 	}
1137 
1138 	if (error == 0)
1139 		vfs_timestamp(&wpipe->pipe_mtime);
1140 
1141 	/*
1142 	 * We have something to offer,
1143 	 * wake up select/poll.
1144 	 */
1145 	if (wpipe->pipe_buffer.cnt)
1146 		pipeselwakeup(wpipe);
1147 
1148 	PIPE_UNLOCK(rpipe);
1149 	return (error);
1150 }
1151 
1152 /*
1153  * we implement a very minimal set of ioctls for compatibility with sockets.
1154  */
1155 static int
1156 pipe_ioctl(fp, cmd, data, active_cred, td)
1157 	struct file *fp;
1158 	u_long cmd;
1159 	void *data;
1160 	struct ucred *active_cred;
1161 	struct thread *td;
1162 {
1163 	struct pipe *mpipe = (struct pipe *)fp->f_data;
1164 #ifdef MAC
1165 	int error;
1166 #endif
1167 
1168 	PIPE_LOCK(mpipe);
1169 
1170 #ifdef MAC
1171 	error = mac_check_pipe_ioctl(active_cred, mpipe, cmd, data);
1172 	if (error)
1173 		return (error);
1174 #endif
1175 
1176 	switch (cmd) {
1177 
1178 	case FIONBIO:
1179 		PIPE_UNLOCK(mpipe);
1180 		return (0);
1181 
1182 	case FIOASYNC:
1183 		if (*(int *)data) {
1184 			mpipe->pipe_state |= PIPE_ASYNC;
1185 		} else {
1186 			mpipe->pipe_state &= ~PIPE_ASYNC;
1187 		}
1188 		PIPE_UNLOCK(mpipe);
1189 		return (0);
1190 
1191 	case FIONREAD:
1192 		if (mpipe->pipe_state & PIPE_DIRECTW)
1193 			*(int *)data = mpipe->pipe_map.cnt;
1194 		else
1195 			*(int *)data = mpipe->pipe_buffer.cnt;
1196 		PIPE_UNLOCK(mpipe);
1197 		return (0);
1198 
1199 	case FIOSETOWN:
1200 		PIPE_UNLOCK(mpipe);
1201 		return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1202 
1203 	case FIOGETOWN:
1204 		PIPE_UNLOCK(mpipe);
1205 		*(int *)data = fgetown(&mpipe->pipe_sigio);
1206 		return (0);
1207 
1208 	/* This is deprecated, FIOSETOWN should be used instead. */
1209 	case TIOCSPGRP:
1210 		PIPE_UNLOCK(mpipe);
1211 		return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1212 
1213 	/* This is deprecated, FIOGETOWN should be used instead. */
1214 	case TIOCGPGRP:
1215 		PIPE_UNLOCK(mpipe);
1216 		*(int *)data = -fgetown(&mpipe->pipe_sigio);
1217 		return (0);
1218 
1219 	}
1220 	PIPE_UNLOCK(mpipe);
1221 	return (ENOTTY);
1222 }
1223 
1224 static int
1225 pipe_poll(fp, events, active_cred, td)
1226 	struct file *fp;
1227 	int events;
1228 	struct ucred *active_cred;
1229 	struct thread *td;
1230 {
1231 	struct pipe *rpipe = (struct pipe *)fp->f_data;
1232 	struct pipe *wpipe;
1233 	int revents = 0;
1234 #ifdef MAC
1235 	int error;
1236 #endif
1237 
1238 	wpipe = rpipe->pipe_peer;
1239 	PIPE_LOCK(rpipe);
1240 #ifdef MAC
1241 	error = mac_check_pipe_poll(active_cred, rpipe);
1242 	if (error)
1243 		goto locked_error;
1244 #endif
1245 	if (events & (POLLIN | POLLRDNORM))
1246 		if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1247 		    (rpipe->pipe_buffer.cnt > 0) ||
1248 		    (rpipe->pipe_state & PIPE_EOF))
1249 			revents |= events & (POLLIN | POLLRDNORM);
1250 
1251 	if (events & (POLLOUT | POLLWRNORM))
1252 		if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
1253 		    (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1254 		     (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1255 			revents |= events & (POLLOUT | POLLWRNORM);
1256 
1257 	if ((rpipe->pipe_state & PIPE_EOF) ||
1258 	    (wpipe == NULL) ||
1259 	    (wpipe->pipe_state & PIPE_EOF))
1260 		revents |= POLLHUP;
1261 
1262 	if (revents == 0) {
1263 		if (events & (POLLIN | POLLRDNORM)) {
1264 			selrecord(td, &rpipe->pipe_sel);
1265 			rpipe->pipe_state |= PIPE_SEL;
1266 		}
1267 
1268 		if (events & (POLLOUT | POLLWRNORM)) {
1269 			selrecord(td, &wpipe->pipe_sel);
1270 			wpipe->pipe_state |= PIPE_SEL;
1271 		}
1272 	}
1273 #ifdef MAC
1274 locked_error:
1275 #endif
1276 	PIPE_UNLOCK(rpipe);
1277 
1278 	return (revents);
1279 }
1280 
1281 /*
1282  * We shouldn't need locks here as we're doing a read and this should
1283  * be a natural race.
1284  */
1285 static int
1286 pipe_stat(fp, ub, active_cred, td)
1287 	struct file *fp;
1288 	struct stat *ub;
1289 	struct ucred *active_cred;
1290 	struct thread *td;
1291 {
1292 	struct pipe *pipe = (struct pipe *)fp->f_data;
1293 #ifdef MAC
1294 	int error;
1295 
1296 	PIPE_LOCK(pipe);
1297 	error = mac_check_pipe_stat(active_cred, pipe);
1298 	PIPE_UNLOCK(pipe);
1299 	if (error)
1300 		return (error);
1301 #endif
1302 	bzero(ub, sizeof(*ub));
1303 	ub->st_mode = S_IFIFO;
1304 	ub->st_blksize = pipe->pipe_buffer.size;
1305 	ub->st_size = pipe->pipe_buffer.cnt;
1306 	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1307 	ub->st_atimespec = pipe->pipe_atime;
1308 	ub->st_mtimespec = pipe->pipe_mtime;
1309 	ub->st_ctimespec = pipe->pipe_ctime;
1310 	ub->st_uid = fp->f_cred->cr_uid;
1311 	ub->st_gid = fp->f_cred->cr_gid;
1312 	/*
1313 	 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1314 	 * XXX (st_dev, st_ino) should be unique.
1315 	 */
1316 	return (0);
1317 }
1318 
1319 /* ARGSUSED */
1320 static int
1321 pipe_close(fp, td)
1322 	struct file *fp;
1323 	struct thread *td;
1324 {
1325 	struct pipe *cpipe = (struct pipe *)fp->f_data;
1326 
1327 	fp->f_ops = &badfileops;
1328 	fp->f_data = NULL;
1329 	funsetown(&cpipe->pipe_sigio);
1330 	pipeclose(cpipe);
1331 	return (0);
1332 }
1333 
1334 static void
1335 pipe_free_kmem(cpipe)
1336 	struct pipe *cpipe;
1337 {
1338 
1339 	GIANT_REQUIRED;
1340 	KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1341 	       ("pipespace: pipe mutex locked"));
1342 
1343 	if (cpipe->pipe_buffer.buffer != NULL) {
1344 		if (cpipe->pipe_buffer.size > PIPE_SIZE)
1345 			--nbigpipe;
1346 		amountpipekva -= cpipe->pipe_buffer.size;
1347 		kmem_free(kernel_map,
1348 			(vm_offset_t)cpipe->pipe_buffer.buffer,
1349 			cpipe->pipe_buffer.size);
1350 		cpipe->pipe_buffer.buffer = NULL;
1351 	}
1352 #ifndef PIPE_NODIRECT
1353 	if (cpipe->pipe_map.kva != 0) {
1354 		amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1355 		kmem_free(kernel_map,
1356 			cpipe->pipe_map.kva,
1357 			cpipe->pipe_buffer.size + PAGE_SIZE);
1358 		cpipe->pipe_map.cnt = 0;
1359 		cpipe->pipe_map.kva = 0;
1360 		cpipe->pipe_map.pos = 0;
1361 		cpipe->pipe_map.npages = 0;
1362 	}
1363 #endif
1364 }
1365 
1366 /*
1367  * shutdown the pipe
1368  */
1369 static void
1370 pipeclose(cpipe)
1371 	struct pipe *cpipe;
1372 {
1373 	struct pipe *ppipe;
1374 	int hadpeer;
1375 
1376 	if (cpipe == NULL)
1377 		return;
1378 
1379 	hadpeer = 0;
1380 
1381 	/* partially created pipes won't have a valid mutex. */
1382 	if (PIPE_MTX(cpipe) != NULL)
1383 		PIPE_LOCK(cpipe);
1384 
1385 	pipeselwakeup(cpipe);
1386 
1387 	/*
1388 	 * If the other side is blocked, wake it up saying that
1389 	 * we want to close it down.
1390 	 */
1391 	while (cpipe->pipe_busy) {
1392 		wakeup(cpipe);
1393 		cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
1394 		msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1395 	}
1396 
1397 #ifdef MAC
1398 	if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL)
1399 		mac_destroy_pipe(cpipe);
1400 #endif
1401 
1402 	/*
1403 	 * Disconnect from peer
1404 	 */
1405 	if ((ppipe = cpipe->pipe_peer) != NULL) {
1406 		hadpeer++;
1407 		pipeselwakeup(ppipe);
1408 
1409 		ppipe->pipe_state |= PIPE_EOF;
1410 		wakeup(ppipe);
1411 		KNOTE(&ppipe->pipe_sel.si_note, 0);
1412 		ppipe->pipe_peer = NULL;
1413 	}
1414 	/*
1415 	 * free resources
1416 	 */
1417 	if (PIPE_MTX(cpipe) != NULL) {
1418 		PIPE_UNLOCK(cpipe);
1419 		if (!hadpeer) {
1420 			mtx_destroy(PIPE_MTX(cpipe));
1421 			free(PIPE_MTX(cpipe), M_TEMP);
1422 		}
1423 	}
1424 	mtx_lock(&Giant);
1425 	pipe_free_kmem(cpipe);
1426 	uma_zfree(pipe_zone, cpipe);
1427 	mtx_unlock(&Giant);
1428 }
1429 
1430 /*ARGSUSED*/
1431 static int
1432 pipe_kqfilter(struct file *fp, struct knote *kn)
1433 {
1434 	struct pipe *cpipe;
1435 
1436 	cpipe = (struct pipe *)kn->kn_fp->f_data;
1437 	switch (kn->kn_filter) {
1438 	case EVFILT_READ:
1439 		kn->kn_fop = &pipe_rfiltops;
1440 		break;
1441 	case EVFILT_WRITE:
1442 		kn->kn_fop = &pipe_wfiltops;
1443 		cpipe = cpipe->pipe_peer;
1444 		if (cpipe == NULL)
1445 			/* other end of pipe has been closed */
1446 			return (EBADF);
1447 		break;
1448 	default:
1449 		return (1);
1450 	}
1451 	kn->kn_hook = cpipe;
1452 
1453 	PIPE_LOCK(cpipe);
1454 	SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1455 	PIPE_UNLOCK(cpipe);
1456 	return (0);
1457 }
1458 
1459 static void
1460 filt_pipedetach(struct knote *kn)
1461 {
1462 	struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1463 
1464 	PIPE_LOCK(cpipe);
1465 	SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1466 	PIPE_UNLOCK(cpipe);
1467 }
1468 
1469 /*ARGSUSED*/
1470 static int
1471 filt_piperead(struct knote *kn, long hint)
1472 {
1473 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1474 	struct pipe *wpipe = rpipe->pipe_peer;
1475 
1476 	PIPE_LOCK(rpipe);
1477 	kn->kn_data = rpipe->pipe_buffer.cnt;
1478 	if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1479 		kn->kn_data = rpipe->pipe_map.cnt;
1480 
1481 	if ((rpipe->pipe_state & PIPE_EOF) ||
1482 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1483 		kn->kn_flags |= EV_EOF;
1484 		PIPE_UNLOCK(rpipe);
1485 		return (1);
1486 	}
1487 	PIPE_UNLOCK(rpipe);
1488 	return (kn->kn_data > 0);
1489 }
1490 
1491 /*ARGSUSED*/
1492 static int
1493 filt_pipewrite(struct knote *kn, long hint)
1494 {
1495 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1496 	struct pipe *wpipe = rpipe->pipe_peer;
1497 
1498 	PIPE_LOCK(rpipe);
1499 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1500 		kn->kn_data = 0;
1501 		kn->kn_flags |= EV_EOF;
1502 		PIPE_UNLOCK(rpipe);
1503 		return (1);
1504 	}
1505 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1506 	if (wpipe->pipe_state & PIPE_DIRECTW)
1507 		kn->kn_data = 0;
1508 
1509 	PIPE_UNLOCK(rpipe);
1510 	return (kn->kn_data >= PIPE_BUF);
1511 }
1512