xref: /freebsd/sys/kern/kern_descrip.c (revision 10f0bcab61ef441cb5af32fb706688d8cbd55dc0)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_descrip.c	8.6 (Berkeley) 4/19/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_compat.h"
41 #include "opt_ddb.h"
42 #include "opt_ktrace.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 
47 #include <sys/conf.h>
48 #include <sys/domain.h>
49 #include <sys/fcntl.h>
50 #include <sys/file.h>
51 #include <sys/filedesc.h>
52 #include <sys/filio.h>
53 #include <sys/jail.h>
54 #include <sys/kernel.h>
55 #include <sys/limits.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/mount.h>
59 #include <sys/mqueue.h>
60 #include <sys/mutex.h>
61 #include <sys/namei.h>
62 #include <sys/priv.h>
63 #include <sys/proc.h>
64 #include <sys/protosw.h>
65 #include <sys/resourcevar.h>
66 #include <sys/signalvar.h>
67 #include <sys/socketvar.h>
68 #include <sys/stat.h>
69 #include <sys/sx.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysctl.h>
72 #include <sys/sysproto.h>
73 #include <sys/unistd.h>
74 #include <sys/user.h>
75 #include <sys/vnode.h>
76 #ifdef KTRACE
77 #include <sys/ktrace.h>
78 #endif
79 
80 #include <security/audit/audit.h>
81 
82 #include <vm/uma.h>
83 
84 #include <ddb/ddb.h>
85 
86 static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
87 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
88 		     "file desc to leader structures");
89 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
90 
91 static uma_zone_t file_zone;
92 
93 
94 /* How to treat 'new' parameter when allocating a fd for do_dup(). */
95 enum dup_type { DUP_VARIABLE, DUP_FIXED };
96 
97 static int do_dup(struct thread *td, enum dup_type type, int old, int new,
98     register_t *retval);
99 static int	fd_first_free(struct filedesc *, int, int);
100 static int	fd_last_used(struct filedesc *, int, int);
101 static void	fdgrowtable(struct filedesc *, int);
102 static void	fdunused(struct filedesc *fdp, int fd);
103 static void	fdused(struct filedesc *fdp, int fd);
104 
105 /*
106  * A process is initially started out with NDFILE descriptors stored within
107  * this structure, selected to be enough for typical applications based on
108  * the historical limit of 20 open files (and the usage of descriptors by
109  * shells).  If these descriptors are exhausted, a larger descriptor table
110  * may be allocated, up to a process' resource limit; the internal arrays
111  * are then unused.
112  */
113 #define NDFILE		20
114 #define NDSLOTSIZE	sizeof(NDSLOTTYPE)
115 #define	NDENTRIES	(NDSLOTSIZE * __CHAR_BIT)
116 #define NDSLOT(x)	((x) / NDENTRIES)
117 #define NDBIT(x)	((NDSLOTTYPE)1 << ((x) % NDENTRIES))
118 #define	NDSLOTS(x)	(((x) + NDENTRIES - 1) / NDENTRIES)
119 
120 /*
121  * Storage required per open file descriptor.
122  */
123 #define OFILESIZE (sizeof(struct file *) + sizeof(char))
124 
125 /*
126  * Basic allocation of descriptors:
127  * one of the above, plus arrays for NDFILE descriptors.
128  */
129 struct filedesc0 {
130 	struct	filedesc fd_fd;
131 	/*
132 	 * These arrays are used when the number of open files is
133 	 * <= NDFILE, and are then pointed to by the pointers above.
134 	 */
135 	struct	file *fd_dfiles[NDFILE];
136 	char	fd_dfileflags[NDFILE];
137 	NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
138 };
139 
140 /*
141  * Descriptor management.
142  */
143 volatile int openfiles;			/* actual number of open files */
144 struct mtx sigio_lock;		/* mtx to protect pointers to sigio */
145 void	(*mq_fdclose)(struct thread *td, int fd, struct file *fp);
146 
147 /* A mutex to protect the association between a proc and filedesc. */
148 static struct mtx	fdesc_mtx;
149 
150 /*
151  * Find the first zero bit in the given bitmap, starting at low and not
152  * exceeding size - 1.
153  */
154 static int
155 fd_first_free(struct filedesc *fdp, int low, int size)
156 {
157 	NDSLOTTYPE *map = fdp->fd_map;
158 	NDSLOTTYPE mask;
159 	int off, maxoff;
160 
161 	if (low >= size)
162 		return (low);
163 
164 	off = NDSLOT(low);
165 	if (low % NDENTRIES) {
166 		mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
167 		if ((mask &= ~map[off]) != 0UL)
168 			return (off * NDENTRIES + ffsl(mask) - 1);
169 		++off;
170 	}
171 	for (maxoff = NDSLOTS(size); off < maxoff; ++off)
172 		if (map[off] != ~0UL)
173 			return (off * NDENTRIES + ffsl(~map[off]) - 1);
174 	return (size);
175 }
176 
177 /*
178  * Find the highest non-zero bit in the given bitmap, starting at low and
179  * not exceeding size - 1.
180  */
181 static int
182 fd_last_used(struct filedesc *fdp, int low, int size)
183 {
184 	NDSLOTTYPE *map = fdp->fd_map;
185 	NDSLOTTYPE mask;
186 	int off, minoff;
187 
188 	if (low >= size)
189 		return (-1);
190 
191 	off = NDSLOT(size);
192 	if (size % NDENTRIES) {
193 		mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES));
194 		if ((mask &= map[off]) != 0)
195 			return (off * NDENTRIES + flsl(mask) - 1);
196 		--off;
197 	}
198 	for (minoff = NDSLOT(low); off >= minoff; --off)
199 		if (map[off] != 0)
200 			return (off * NDENTRIES + flsl(map[off]) - 1);
201 	return (low - 1);
202 }
203 
204 static int
205 fdisused(struct filedesc *fdp, int fd)
206 {
207         KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
208             ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
209 	return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
210 }
211 
212 /*
213  * Mark a file descriptor as used.
214  */
215 static void
216 fdused(struct filedesc *fdp, int fd)
217 {
218 
219 	FILEDESC_XLOCK_ASSERT(fdp);
220 	KASSERT(!fdisused(fdp, fd),
221 	    ("fd already used"));
222 
223 	fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
224 	if (fd > fdp->fd_lastfile)
225 		fdp->fd_lastfile = fd;
226 	if (fd == fdp->fd_freefile)
227 		fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles);
228 }
229 
230 /*
231  * Mark a file descriptor as unused.
232  */
233 static void
234 fdunused(struct filedesc *fdp, int fd)
235 {
236 
237 	FILEDESC_XLOCK_ASSERT(fdp);
238 	KASSERT(fdisused(fdp, fd),
239 	    ("fd is already unused"));
240 	KASSERT(fdp->fd_ofiles[fd] == NULL,
241 	    ("fd is still in use"));
242 
243 	fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
244 	if (fd < fdp->fd_freefile)
245 		fdp->fd_freefile = fd;
246 	if (fd == fdp->fd_lastfile)
247 		fdp->fd_lastfile = fd_last_used(fdp, 0, fd);
248 }
249 
250 /*
251  * System calls on descriptors.
252  */
253 #ifndef _SYS_SYSPROTO_H_
254 struct getdtablesize_args {
255 	int	dummy;
256 };
257 #endif
258 /* ARGSUSED */
259 int
260 getdtablesize(struct thread *td, struct getdtablesize_args *uap)
261 {
262 	struct proc *p = td->td_proc;
263 
264 	PROC_LOCK(p);
265 	td->td_retval[0] =
266 	    min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
267 	PROC_UNLOCK(p);
268 	return (0);
269 }
270 
271 /*
272  * Duplicate a file descriptor to a particular value.
273  *
274  * Note: keep in mind that a potential race condition exists when closing
275  * descriptors from a shared descriptor table (via rfork).
276  */
277 #ifndef _SYS_SYSPROTO_H_
278 struct dup2_args {
279 	u_int	from;
280 	u_int	to;
281 };
282 #endif
283 /* ARGSUSED */
284 int
285 dup2(struct thread *td, struct dup2_args *uap)
286 {
287 
288 	return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to,
289 		    td->td_retval));
290 }
291 
292 /*
293  * Duplicate a file descriptor.
294  */
295 #ifndef _SYS_SYSPROTO_H_
296 struct dup_args {
297 	u_int	fd;
298 };
299 #endif
300 /* ARGSUSED */
301 int
302 dup(struct thread *td, struct dup_args *uap)
303 {
304 
305 	return (do_dup(td, DUP_VARIABLE, (int)uap->fd, 0, td->td_retval));
306 }
307 
308 /*
309  * The file control system call.
310  */
311 #ifndef _SYS_SYSPROTO_H_
312 struct fcntl_args {
313 	int	fd;
314 	int	cmd;
315 	long	arg;
316 };
317 #endif
318 /* ARGSUSED */
319 int
320 fcntl(struct thread *td, struct fcntl_args *uap)
321 {
322 	struct flock fl;
323 	struct oflock ofl;
324 	intptr_t arg;
325 	int error;
326 	int cmd;
327 
328 	error = 0;
329 	cmd = uap->cmd;
330 	switch (uap->cmd) {
331 	case F_OGETLK:
332 	case F_OSETLK:
333 	case F_OSETLKW:
334 		/*
335 		 * Convert old flock structure to new.
336 		 */
337 		error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl));
338 		fl.l_start = ofl.l_start;
339 		fl.l_len = ofl.l_len;
340 		fl.l_pid = ofl.l_pid;
341 		fl.l_type = ofl.l_type;
342 		fl.l_whence = ofl.l_whence;
343 		fl.l_sysid = 0;
344 
345 		switch (uap->cmd) {
346 		case F_OGETLK:
347 		    cmd = F_GETLK;
348 		    break;
349 		case F_OSETLK:
350 		    cmd = F_SETLK;
351 		    break;
352 		case F_OSETLKW:
353 		    cmd = F_SETLKW;
354 		    break;
355 		}
356 		arg = (intptr_t)&fl;
357 		break;
358         case F_GETLK:
359         case F_SETLK:
360         case F_SETLKW:
361 	case F_SETLK_REMOTE:
362                 error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
363                 arg = (intptr_t)&fl;
364                 break;
365 	default:
366 		arg = uap->arg;
367 		break;
368 	}
369 	if (error)
370 		return (error);
371 	error = kern_fcntl(td, uap->fd, cmd, arg);
372 	if (error)
373 		return (error);
374 	if (uap->cmd == F_OGETLK) {
375 		ofl.l_start = fl.l_start;
376 		ofl.l_len = fl.l_len;
377 		ofl.l_pid = fl.l_pid;
378 		ofl.l_type = fl.l_type;
379 		ofl.l_whence = fl.l_whence;
380 		error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl));
381 	} else if (uap->cmd == F_GETLK) {
382 		error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl));
383 	}
384 	return (error);
385 }
386 
387 static inline struct file *
388 fdtofp(int fd, struct filedesc *fdp)
389 {
390 	struct file *fp;
391 
392 	FILEDESC_LOCK_ASSERT(fdp);
393 	if ((unsigned)fd >= fdp->fd_nfiles ||
394 	    (fp = fdp->fd_ofiles[fd]) == NULL)
395 		return (NULL);
396 	return (fp);
397 }
398 
399 int
400 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
401 {
402 	struct filedesc *fdp;
403 	struct flock *flp;
404 	struct file *fp;
405 	struct proc *p;
406 	char *pop;
407 	struct vnode *vp;
408 	u_int newmin;
409 	int error, flg, tmp;
410 	int vfslocked;
411 
412 	vfslocked = 0;
413 	error = 0;
414 	flg = F_POSIX;
415 	p = td->td_proc;
416 	fdp = p->p_fd;
417 
418 	switch (cmd) {
419 	case F_DUPFD:
420 		FILEDESC_SLOCK(fdp);
421 		if ((fp = fdtofp(fd, fdp)) == NULL) {
422 			FILEDESC_SUNLOCK(fdp);
423 			error = EBADF;
424 			break;
425 		}
426 		FILEDESC_SUNLOCK(fdp);
427 		newmin = arg;
428 		PROC_LOCK(p);
429 		if (newmin >= lim_cur(p, RLIMIT_NOFILE) ||
430 		    newmin >= maxfilesperproc) {
431 			PROC_UNLOCK(p);
432 			error = EINVAL;
433 			break;
434 		}
435 		PROC_UNLOCK(p);
436 		error = do_dup(td, DUP_VARIABLE, fd, newmin, td->td_retval);
437 		break;
438 
439 	case F_DUP2FD:
440 		tmp = arg;
441 		error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval);
442 		break;
443 
444 	case F_GETFD:
445 		FILEDESC_SLOCK(fdp);
446 		if ((fp = fdtofp(fd, fdp)) == NULL) {
447 			FILEDESC_SUNLOCK(fdp);
448 			error = EBADF;
449 			break;
450 		}
451 		pop = &fdp->fd_ofileflags[fd];
452 		td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
453 		FILEDESC_SUNLOCK(fdp);
454 		break;
455 
456 	case F_SETFD:
457 		FILEDESC_XLOCK(fdp);
458 		if ((fp = fdtofp(fd, fdp)) == NULL) {
459 			FILEDESC_XUNLOCK(fdp);
460 			error = EBADF;
461 			break;
462 		}
463 		pop = &fdp->fd_ofileflags[fd];
464 		*pop = (*pop &~ UF_EXCLOSE) |
465 		    (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
466 		FILEDESC_XUNLOCK(fdp);
467 		break;
468 
469 	case F_GETFL:
470 		FILEDESC_SLOCK(fdp);
471 		if ((fp = fdtofp(fd, fdp)) == NULL) {
472 			FILEDESC_SUNLOCK(fdp);
473 			error = EBADF;
474 			break;
475 		}
476 		td->td_retval[0] = OFLAGS(fp->f_flag);
477 		FILEDESC_SUNLOCK(fdp);
478 		break;
479 
480 	case F_SETFL:
481 		FILEDESC_SLOCK(fdp);
482 		if ((fp = fdtofp(fd, fdp)) == NULL) {
483 			FILEDESC_SUNLOCK(fdp);
484 			error = EBADF;
485 			break;
486 		}
487 		fhold(fp);
488 		FILEDESC_SUNLOCK(fdp);
489 		do {
490 			tmp = flg = fp->f_flag;
491 			tmp &= ~FCNTLFLAGS;
492 			tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
493 		} while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
494 		tmp = fp->f_flag & FNONBLOCK;
495 		error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
496 		if (error) {
497 			fdrop(fp, td);
498 			break;
499 		}
500 		tmp = fp->f_flag & FASYNC;
501 		error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
502 		if (error == 0) {
503 			fdrop(fp, td);
504 			break;
505 		}
506 		atomic_clear_int(&fp->f_flag, FNONBLOCK);
507 		tmp = 0;
508 		(void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
509 		fdrop(fp, td);
510 		break;
511 
512 	case F_GETOWN:
513 		FILEDESC_SLOCK(fdp);
514 		if ((fp = fdtofp(fd, fdp)) == NULL) {
515 			FILEDESC_SUNLOCK(fdp);
516 			error = EBADF;
517 			break;
518 		}
519 		fhold(fp);
520 		FILEDESC_SUNLOCK(fdp);
521 		error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
522 		if (error == 0)
523 			td->td_retval[0] = tmp;
524 		fdrop(fp, td);
525 		break;
526 
527 	case F_SETOWN:
528 		FILEDESC_SLOCK(fdp);
529 		if ((fp = fdtofp(fd, fdp)) == NULL) {
530 			FILEDESC_SUNLOCK(fdp);
531 			error = EBADF;
532 			break;
533 		}
534 		fhold(fp);
535 		FILEDESC_SUNLOCK(fdp);
536 		tmp = arg;
537 		error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
538 		fdrop(fp, td);
539 		break;
540 
541 	case F_SETLK_REMOTE:
542 		error = priv_check(td, PRIV_NFS_LOCKD);
543 		if (error)
544 			return (error);
545 		flg = F_REMOTE;
546 		goto do_setlk;
547 
548 	case F_SETLKW:
549 		flg |= F_WAIT;
550 		/* FALLTHROUGH F_SETLK */
551 
552 	case F_SETLK:
553 	do_setlk:
554 		FILEDESC_SLOCK(fdp);
555 		if ((fp = fdtofp(fd, fdp)) == NULL) {
556 			FILEDESC_SUNLOCK(fdp);
557 			error = EBADF;
558 			break;
559 		}
560 		if (fp->f_type != DTYPE_VNODE) {
561 			FILEDESC_SUNLOCK(fdp);
562 			error = EBADF;
563 			break;
564 		}
565 		flp = (struct flock *)arg;
566 		if (flp->l_whence == SEEK_CUR) {
567 			if (fp->f_offset < 0 ||
568 			    (flp->l_start > 0 &&
569 			     fp->f_offset > OFF_MAX - flp->l_start)) {
570 				FILEDESC_SUNLOCK(fdp);
571 				error = EOVERFLOW;
572 				break;
573 			}
574 			flp->l_start += fp->f_offset;
575 		}
576 
577 		/*
578 		 * VOP_ADVLOCK() may block.
579 		 */
580 		fhold(fp);
581 		FILEDESC_SUNLOCK(fdp);
582 		vp = fp->f_vnode;
583 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
584 		switch (flp->l_type) {
585 		case F_RDLCK:
586 			if ((fp->f_flag & FREAD) == 0) {
587 				error = EBADF;
588 				break;
589 			}
590 			PROC_LOCK(p->p_leader);
591 			p->p_leader->p_flag |= P_ADVLOCK;
592 			PROC_UNLOCK(p->p_leader);
593 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
594 			    flp, flg);
595 			break;
596 		case F_WRLCK:
597 			if ((fp->f_flag & FWRITE) == 0) {
598 				error = EBADF;
599 				break;
600 			}
601 			PROC_LOCK(p->p_leader);
602 			p->p_leader->p_flag |= P_ADVLOCK;
603 			PROC_UNLOCK(p->p_leader);
604 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
605 			    flp, flg);
606 			break;
607 		case F_UNLCK:
608 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
609 			    flp, flg);
610 			break;
611 		case F_UNLCKSYS:
612 			/*
613 			 * Temporary api for testing remote lock
614 			 * infrastructure.
615 			 */
616 			if (flg != F_REMOTE) {
617 				error = EINVAL;
618 				break;
619 			}
620 			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
621 			    F_UNLCKSYS, flp, flg);
622 			break;
623 		default:
624 			error = EINVAL;
625 			break;
626 		}
627 		VFS_UNLOCK_GIANT(vfslocked);
628 		vfslocked = 0;
629 		/* Check for race with close */
630 		FILEDESC_SLOCK(fdp);
631 		if ((unsigned) fd >= fdp->fd_nfiles ||
632 		    fp != fdp->fd_ofiles[fd]) {
633 			FILEDESC_SUNLOCK(fdp);
634 			flp->l_whence = SEEK_SET;
635 			flp->l_start = 0;
636 			flp->l_len = 0;
637 			flp->l_type = F_UNLCK;
638 			vfslocked = VFS_LOCK_GIANT(vp->v_mount);
639 			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
640 					   F_UNLCK, flp, F_POSIX);
641 			VFS_UNLOCK_GIANT(vfslocked);
642 			vfslocked = 0;
643 		} else
644 			FILEDESC_SUNLOCK(fdp);
645 		fdrop(fp, td);
646 		break;
647 
648 	case F_GETLK:
649 		FILEDESC_SLOCK(fdp);
650 		if ((fp = fdtofp(fd, fdp)) == NULL) {
651 			FILEDESC_SUNLOCK(fdp);
652 			error = EBADF;
653 			break;
654 		}
655 		if (fp->f_type != DTYPE_VNODE) {
656 			FILEDESC_SUNLOCK(fdp);
657 			error = EBADF;
658 			break;
659 		}
660 		flp = (struct flock *)arg;
661 		if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
662 		    flp->l_type != F_UNLCK) {
663 			FILEDESC_SUNLOCK(fdp);
664 			error = EINVAL;
665 			break;
666 		}
667 		if (flp->l_whence == SEEK_CUR) {
668 			if ((flp->l_start > 0 &&
669 			    fp->f_offset > OFF_MAX - flp->l_start) ||
670 			    (flp->l_start < 0 &&
671 			     fp->f_offset < OFF_MIN - flp->l_start)) {
672 				FILEDESC_SUNLOCK(fdp);
673 				error = EOVERFLOW;
674 				break;
675 			}
676 			flp->l_start += fp->f_offset;
677 		}
678 		/*
679 		 * VOP_ADVLOCK() may block.
680 		 */
681 		fhold(fp);
682 		FILEDESC_SUNLOCK(fdp);
683 		vp = fp->f_vnode;
684 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
685 		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
686 		    F_POSIX);
687 		VFS_UNLOCK_GIANT(vfslocked);
688 		vfslocked = 0;
689 		fdrop(fp, td);
690 		break;
691 	default:
692 		error = EINVAL;
693 		break;
694 	}
695 	VFS_UNLOCK_GIANT(vfslocked);
696 	return (error);
697 }
698 
699 /*
700  * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
701  */
702 static int
703 do_dup(struct thread *td, enum dup_type type, int old, int new,
704     register_t *retval)
705 {
706 	struct filedesc *fdp;
707 	struct proc *p;
708 	struct file *fp;
709 	struct file *delfp;
710 	int error, holdleaders, maxfd;
711 
712 	KASSERT((type == DUP_VARIABLE || type == DUP_FIXED),
713 	    ("invalid dup type %d", type));
714 
715 	p = td->td_proc;
716 	fdp = p->p_fd;
717 
718 	/*
719 	 * Verify we have a valid descriptor to dup from and possibly to
720 	 * dup to.
721 	 */
722 	if (old < 0 || new < 0)
723 		return (EBADF);
724 	PROC_LOCK(p);
725 	maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
726 	PROC_UNLOCK(p);
727 	if (new >= maxfd)
728 		return (EMFILE);
729 
730 	FILEDESC_XLOCK(fdp);
731 	if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) {
732 		FILEDESC_XUNLOCK(fdp);
733 		return (EBADF);
734 	}
735 	if (type == DUP_FIXED && old == new) {
736 		*retval = new;
737 		FILEDESC_XUNLOCK(fdp);
738 		return (0);
739 	}
740 	fp = fdp->fd_ofiles[old];
741 	fhold(fp);
742 
743 	/*
744 	 * If the caller specified a file descriptor, make sure the file
745 	 * table is large enough to hold it, and grab it.  Otherwise, just
746 	 * allocate a new descriptor the usual way.  Since the filedesc
747 	 * lock may be temporarily dropped in the process, we have to look
748 	 * out for a race.
749 	 */
750 	if (type == DUP_FIXED) {
751 		if (new >= fdp->fd_nfiles)
752 			fdgrowtable(fdp, new + 1);
753 		if (fdp->fd_ofiles[new] == NULL)
754 			fdused(fdp, new);
755 	} else {
756 		if ((error = fdalloc(td, new, &new)) != 0) {
757 			FILEDESC_XUNLOCK(fdp);
758 			fdrop(fp, td);
759 			return (error);
760 		}
761 	}
762 
763 	/*
764 	 * If the old file changed out from under us then treat it as a
765 	 * bad file descriptor.  Userland should do its own locking to
766 	 * avoid this case.
767 	 */
768 	if (fdp->fd_ofiles[old] != fp) {
769 		/* we've allocated a descriptor which we won't use */
770 		if (fdp->fd_ofiles[new] == NULL)
771 			fdunused(fdp, new);
772 		FILEDESC_XUNLOCK(fdp);
773 		fdrop(fp, td);
774 		return (EBADF);
775 	}
776 	KASSERT(old != new,
777 	    ("new fd is same as old"));
778 
779 	/*
780 	 * Save info on the descriptor being overwritten.  We cannot close
781 	 * it without introducing an ownership race for the slot, since we
782 	 * need to drop the filedesc lock to call closef().
783 	 *
784 	 * XXX this duplicates parts of close().
785 	 */
786 	delfp = fdp->fd_ofiles[new];
787 	holdleaders = 0;
788 	if (delfp != NULL) {
789 		if (td->td_proc->p_fdtol != NULL) {
790 			/*
791 			 * Ask fdfree() to sleep to ensure that all relevant
792 			 * process leaders can be traversed in closef().
793 			 */
794 			fdp->fd_holdleaderscount++;
795 			holdleaders = 1;
796 		}
797 	}
798 
799 	/*
800 	 * Duplicate the source descriptor
801 	 */
802 	fdp->fd_ofiles[new] = fp;
803 	fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
804 	if (new > fdp->fd_lastfile)
805 		fdp->fd_lastfile = new;
806 	*retval = new;
807 
808 	/*
809 	 * If we dup'd over a valid file, we now own the reference to it
810 	 * and must dispose of it using closef() semantics (as if a
811 	 * close() were performed on it).
812 	 *
813 	 * XXX this duplicates parts of close().
814 	 */
815 	if (delfp != NULL) {
816 		knote_fdclose(td, new);
817 		if (delfp->f_type == DTYPE_MQUEUE)
818 			mq_fdclose(td, new, delfp);
819 		FILEDESC_XUNLOCK(fdp);
820 		(void) closef(delfp, td);
821 		if (holdleaders) {
822 			FILEDESC_XLOCK(fdp);
823 			fdp->fd_holdleaderscount--;
824 			if (fdp->fd_holdleaderscount == 0 &&
825 			    fdp->fd_holdleaderswakeup != 0) {
826 				fdp->fd_holdleaderswakeup = 0;
827 				wakeup(&fdp->fd_holdleaderscount);
828 			}
829 			FILEDESC_XUNLOCK(fdp);
830 		}
831 	} else {
832 		FILEDESC_XUNLOCK(fdp);
833 	}
834 	return (0);
835 }
836 
837 /*
838  * If sigio is on the list associated with a process or process group,
839  * disable signalling from the device, remove sigio from the list and
840  * free sigio.
841  */
842 void
843 funsetown(struct sigio **sigiop)
844 {
845 	struct sigio *sigio;
846 
847 	SIGIO_LOCK();
848 	sigio = *sigiop;
849 	if (sigio == NULL) {
850 		SIGIO_UNLOCK();
851 		return;
852 	}
853 	*(sigio->sio_myref) = NULL;
854 	if ((sigio)->sio_pgid < 0) {
855 		struct pgrp *pg = (sigio)->sio_pgrp;
856 		PGRP_LOCK(pg);
857 		SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
858 			     sigio, sio_pgsigio);
859 		PGRP_UNLOCK(pg);
860 	} else {
861 		struct proc *p = (sigio)->sio_proc;
862 		PROC_LOCK(p);
863 		SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
864 			     sigio, sio_pgsigio);
865 		PROC_UNLOCK(p);
866 	}
867 	SIGIO_UNLOCK();
868 	crfree(sigio->sio_ucred);
869 	FREE(sigio, M_SIGIO);
870 }
871 
872 /*
873  * Free a list of sigio structures.
874  * We only need to lock the SIGIO_LOCK because we have made ourselves
875  * inaccessible to callers of fsetown and therefore do not need to lock
876  * the proc or pgrp struct for the list manipulation.
877  */
878 void
879 funsetownlst(struct sigiolst *sigiolst)
880 {
881 	struct proc *p;
882 	struct pgrp *pg;
883 	struct sigio *sigio;
884 
885 	sigio = SLIST_FIRST(sigiolst);
886 	if (sigio == NULL)
887 		return;
888 	p = NULL;
889 	pg = NULL;
890 
891 	/*
892 	 * Every entry of the list should belong
893 	 * to a single proc or pgrp.
894 	 */
895 	if (sigio->sio_pgid < 0) {
896 		pg = sigio->sio_pgrp;
897 		PGRP_LOCK_ASSERT(pg, MA_NOTOWNED);
898 	} else /* if (sigio->sio_pgid > 0) */ {
899 		p = sigio->sio_proc;
900 		PROC_LOCK_ASSERT(p, MA_NOTOWNED);
901 	}
902 
903 	SIGIO_LOCK();
904 	while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
905 		*(sigio->sio_myref) = NULL;
906 		if (pg != NULL) {
907 			KASSERT(sigio->sio_pgid < 0,
908 			    ("Proc sigio in pgrp sigio list"));
909 			KASSERT(sigio->sio_pgrp == pg,
910 			    ("Bogus pgrp in sigio list"));
911 			PGRP_LOCK(pg);
912 			SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,
913 			    sio_pgsigio);
914 			PGRP_UNLOCK(pg);
915 		} else /* if (p != NULL) */ {
916 			KASSERT(sigio->sio_pgid > 0,
917 			    ("Pgrp sigio in proc sigio list"));
918 			KASSERT(sigio->sio_proc == p,
919 			    ("Bogus proc in sigio list"));
920 			PROC_LOCK(p);
921 			SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,
922 			    sio_pgsigio);
923 			PROC_UNLOCK(p);
924 		}
925 		SIGIO_UNLOCK();
926 		crfree(sigio->sio_ucred);
927 		FREE(sigio, M_SIGIO);
928 		SIGIO_LOCK();
929 	}
930 	SIGIO_UNLOCK();
931 }
932 
933 /*
934  * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
935  *
936  * After permission checking, add a sigio structure to the sigio list for
937  * the process or process group.
938  */
939 int
940 fsetown(pid_t pgid, struct sigio **sigiop)
941 {
942 	struct proc *proc;
943 	struct pgrp *pgrp;
944 	struct sigio *sigio;
945 	int ret;
946 
947 	if (pgid == 0) {
948 		funsetown(sigiop);
949 		return (0);
950 	}
951 
952 	ret = 0;
953 
954 	/* Allocate and fill in the new sigio out of locks. */
955 	MALLOC(sigio, struct sigio *, sizeof(struct sigio), M_SIGIO, M_WAITOK);
956 	sigio->sio_pgid = pgid;
957 	sigio->sio_ucred = crhold(curthread->td_ucred);
958 	sigio->sio_myref = sigiop;
959 
960 	sx_slock(&proctree_lock);
961 	if (pgid > 0) {
962 		proc = pfind(pgid);
963 		if (proc == NULL) {
964 			ret = ESRCH;
965 			goto fail;
966 		}
967 
968 		/*
969 		 * Policy - Don't allow a process to FSETOWN a process
970 		 * in another session.
971 		 *
972 		 * Remove this test to allow maximum flexibility or
973 		 * restrict FSETOWN to the current process or process
974 		 * group for maximum safety.
975 		 */
976 		PROC_UNLOCK(proc);
977 		if (proc->p_session != curthread->td_proc->p_session) {
978 			ret = EPERM;
979 			goto fail;
980 		}
981 
982 		pgrp = NULL;
983 	} else /* if (pgid < 0) */ {
984 		pgrp = pgfind(-pgid);
985 		if (pgrp == NULL) {
986 			ret = ESRCH;
987 			goto fail;
988 		}
989 		PGRP_UNLOCK(pgrp);
990 
991 		/*
992 		 * Policy - Don't allow a process to FSETOWN a process
993 		 * in another session.
994 		 *
995 		 * Remove this test to allow maximum flexibility or
996 		 * restrict FSETOWN to the current process or process
997 		 * group for maximum safety.
998 		 */
999 		if (pgrp->pg_session != curthread->td_proc->p_session) {
1000 			ret = EPERM;
1001 			goto fail;
1002 		}
1003 
1004 		proc = NULL;
1005 	}
1006 	funsetown(sigiop);
1007 	if (pgid > 0) {
1008 		PROC_LOCK(proc);
1009 		/*
1010 		 * Since funsetownlst() is called without the proctree
1011 		 * locked, we need to check for P_WEXIT.
1012 		 * XXX: is ESRCH correct?
1013 		 */
1014 		if ((proc->p_flag & P_WEXIT) != 0) {
1015 			PROC_UNLOCK(proc);
1016 			ret = ESRCH;
1017 			goto fail;
1018 		}
1019 		SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
1020 		sigio->sio_proc = proc;
1021 		PROC_UNLOCK(proc);
1022 	} else {
1023 		PGRP_LOCK(pgrp);
1024 		SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
1025 		sigio->sio_pgrp = pgrp;
1026 		PGRP_UNLOCK(pgrp);
1027 	}
1028 	sx_sunlock(&proctree_lock);
1029 	SIGIO_LOCK();
1030 	*sigiop = sigio;
1031 	SIGIO_UNLOCK();
1032 	return (0);
1033 
1034 fail:
1035 	sx_sunlock(&proctree_lock);
1036 	crfree(sigio->sio_ucred);
1037 	FREE(sigio, M_SIGIO);
1038 	return (ret);
1039 }
1040 
1041 /*
1042  * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1043  */
1044 pid_t
1045 fgetown(sigiop)
1046 	struct sigio **sigiop;
1047 {
1048 	pid_t pgid;
1049 
1050 	SIGIO_LOCK();
1051 	pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
1052 	SIGIO_UNLOCK();
1053 	return (pgid);
1054 }
1055 
1056 /*
1057  * Close a file descriptor.
1058  */
1059 #ifndef _SYS_SYSPROTO_H_
1060 struct close_args {
1061 	int     fd;
1062 };
1063 #endif
1064 /* ARGSUSED */
1065 int
1066 close(td, uap)
1067 	struct thread *td;
1068 	struct close_args *uap;
1069 {
1070 
1071 	return (kern_close(td, uap->fd));
1072 }
1073 
1074 int
1075 kern_close(td, fd)
1076 	struct thread *td;
1077 	int fd;
1078 {
1079 	struct filedesc *fdp;
1080 	struct file *fp;
1081 	int error;
1082 	int holdleaders;
1083 
1084 	error = 0;
1085 	holdleaders = 0;
1086 	fdp = td->td_proc->p_fd;
1087 
1088 	AUDIT_SYSCLOSE(td, fd);
1089 
1090 	FILEDESC_XLOCK(fdp);
1091 	if ((unsigned)fd >= fdp->fd_nfiles ||
1092 	    (fp = fdp->fd_ofiles[fd]) == NULL) {
1093 		FILEDESC_XUNLOCK(fdp);
1094 		return (EBADF);
1095 	}
1096 	fdp->fd_ofiles[fd] = NULL;
1097 	fdp->fd_ofileflags[fd] = 0;
1098 	fdunused(fdp, fd);
1099 	if (td->td_proc->p_fdtol != NULL) {
1100 		/*
1101 		 * Ask fdfree() to sleep to ensure that all relevant
1102 		 * process leaders can be traversed in closef().
1103 		 */
1104 		fdp->fd_holdleaderscount++;
1105 		holdleaders = 1;
1106 	}
1107 
1108 	/*
1109 	 * We now hold the fp reference that used to be owned by the
1110 	 * descriptor array.  We have to unlock the FILEDESC *AFTER*
1111 	 * knote_fdclose to prevent a race of the fd getting opened, a knote
1112 	 * added, and deleteing a knote for the new fd.
1113 	 */
1114 	knote_fdclose(td, fd);
1115 	if (fp->f_type == DTYPE_MQUEUE)
1116 		mq_fdclose(td, fd, fp);
1117 	FILEDESC_XUNLOCK(fdp);
1118 
1119 	error = closef(fp, td);
1120 	if (holdleaders) {
1121 		FILEDESC_XLOCK(fdp);
1122 		fdp->fd_holdleaderscount--;
1123 		if (fdp->fd_holdleaderscount == 0 &&
1124 		    fdp->fd_holdleaderswakeup != 0) {
1125 			fdp->fd_holdleaderswakeup = 0;
1126 			wakeup(&fdp->fd_holdleaderscount);
1127 		}
1128 		FILEDESC_XUNLOCK(fdp);
1129 	}
1130 	return (error);
1131 }
1132 
1133 #if defined(COMPAT_43)
1134 /*
1135  * Return status information about a file descriptor.
1136  */
1137 #ifndef _SYS_SYSPROTO_H_
1138 struct ofstat_args {
1139 	int	fd;
1140 	struct	ostat *sb;
1141 };
1142 #endif
1143 /* ARGSUSED */
1144 int
1145 ofstat(struct thread *td, struct ofstat_args *uap)
1146 {
1147 	struct ostat oub;
1148 	struct stat ub;
1149 	int error;
1150 
1151 	error = kern_fstat(td, uap->fd, &ub);
1152 	if (error == 0) {
1153 		cvtstat(&ub, &oub);
1154 		error = copyout(&oub, uap->sb, sizeof(oub));
1155 	}
1156 	return (error);
1157 }
1158 #endif /* COMPAT_43 */
1159 
1160 /*
1161  * Return status information about a file descriptor.
1162  */
1163 #ifndef _SYS_SYSPROTO_H_
1164 struct fstat_args {
1165 	int	fd;
1166 	struct	stat *sb;
1167 };
1168 #endif
1169 /* ARGSUSED */
1170 int
1171 fstat(struct thread *td, struct fstat_args *uap)
1172 {
1173 	struct stat ub;
1174 	int error;
1175 
1176 	error = kern_fstat(td, uap->fd, &ub);
1177 	if (error == 0)
1178 		error = copyout(&ub, uap->sb, sizeof(ub));
1179 	return (error);
1180 }
1181 
1182 int
1183 kern_fstat(struct thread *td, int fd, struct stat *sbp)
1184 {
1185 	struct file *fp;
1186 	int error;
1187 
1188 	AUDIT_ARG(fd, fd);
1189 
1190 	if ((error = fget(td, fd, &fp)) != 0)
1191 		return (error);
1192 
1193 	AUDIT_ARG(file, td->td_proc, fp);
1194 
1195 	error = fo_stat(fp, sbp, td->td_ucred, td);
1196 	fdrop(fp, td);
1197 #ifdef KTRACE
1198 	if (error == 0 && KTRPOINT(td, KTR_STRUCT))
1199 		ktrstat(sbp);
1200 #endif
1201 	return (error);
1202 }
1203 
1204 /*
1205  * Return status information about a file descriptor.
1206  */
1207 #ifndef _SYS_SYSPROTO_H_
1208 struct nfstat_args {
1209 	int	fd;
1210 	struct	nstat *sb;
1211 };
1212 #endif
1213 /* ARGSUSED */
1214 int
1215 nfstat(struct thread *td, struct nfstat_args *uap)
1216 {
1217 	struct nstat nub;
1218 	struct stat ub;
1219 	int error;
1220 
1221 	error = kern_fstat(td, uap->fd, &ub);
1222 	if (error == 0) {
1223 		cvtnstat(&ub, &nub);
1224 		error = copyout(&nub, uap->sb, sizeof(nub));
1225 	}
1226 	return (error);
1227 }
1228 
1229 /*
1230  * Return pathconf information about a file descriptor.
1231  */
1232 #ifndef _SYS_SYSPROTO_H_
1233 struct fpathconf_args {
1234 	int	fd;
1235 	int	name;
1236 };
1237 #endif
1238 /* ARGSUSED */
1239 int
1240 fpathconf(struct thread *td, struct fpathconf_args *uap)
1241 {
1242 	struct file *fp;
1243 	struct vnode *vp;
1244 	int error;
1245 
1246 	if ((error = fget(td, uap->fd, &fp)) != 0)
1247 		return (error);
1248 
1249 	/* If asynchronous I/O is available, it works for all descriptors. */
1250 	if (uap->name == _PC_ASYNC_IO) {
1251 		td->td_retval[0] = async_io_version;
1252 		goto out;
1253 	}
1254 	vp = fp->f_vnode;
1255 	if (vp != NULL) {
1256 		int vfslocked;
1257 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1258 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1259 		error = VOP_PATHCONF(vp, uap->name, td->td_retval);
1260 		VOP_UNLOCK(vp, 0);
1261 		VFS_UNLOCK_GIANT(vfslocked);
1262 	} else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1263 		if (uap->name != _PC_PIPE_BUF) {
1264 			error = EINVAL;
1265 		} else {
1266 			td->td_retval[0] = PIPE_BUF;
1267 		error = 0;
1268 		}
1269 	} else {
1270 		error = EOPNOTSUPP;
1271 	}
1272 out:
1273 	fdrop(fp, td);
1274 	return (error);
1275 }
1276 
1277 /*
1278  * Grow the file table to accomodate (at least) nfd descriptors.  This may
1279  * block and drop the filedesc lock, but it will reacquire it before
1280  * returning.
1281  */
1282 static void
1283 fdgrowtable(struct filedesc *fdp, int nfd)
1284 {
1285 	struct file **ntable;
1286 	char *nfileflags;
1287 	int nnfiles, onfiles;
1288 	NDSLOTTYPE *nmap;
1289 
1290 	FILEDESC_XLOCK_ASSERT(fdp);
1291 
1292 	KASSERT(fdp->fd_nfiles > 0,
1293 	    ("zero-length file table"));
1294 
1295 	/* compute the size of the new table */
1296 	onfiles = fdp->fd_nfiles;
1297 	nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
1298 	if (nnfiles <= onfiles)
1299 		/* the table is already large enough */
1300 		return;
1301 
1302 	/* allocate a new table and (if required) new bitmaps */
1303 	FILEDESC_XUNLOCK(fdp);
1304 	MALLOC(ntable, struct file **, nnfiles * OFILESIZE,
1305 	    M_FILEDESC, M_ZERO | M_WAITOK);
1306 	nfileflags = (char *)&ntable[nnfiles];
1307 	if (NDSLOTS(nnfiles) > NDSLOTS(onfiles))
1308 		MALLOC(nmap, NDSLOTTYPE *, NDSLOTS(nnfiles) * NDSLOTSIZE,
1309 		    M_FILEDESC, M_ZERO | M_WAITOK);
1310 	else
1311 		nmap = NULL;
1312 	FILEDESC_XLOCK(fdp);
1313 
1314 	/*
1315 	 * We now have new tables ready to go.  Since we dropped the
1316 	 * filedesc lock to call malloc(), watch out for a race.
1317 	 */
1318 	onfiles = fdp->fd_nfiles;
1319 	if (onfiles >= nnfiles) {
1320 		/* we lost the race, but that's OK */
1321 		free(ntable, M_FILEDESC);
1322 		if (nmap != NULL)
1323 			free(nmap, M_FILEDESC);
1324 		return;
1325 	}
1326 	bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable));
1327 	bcopy(fdp->fd_ofileflags, nfileflags, onfiles);
1328 	if (onfiles > NDFILE)
1329 		free(fdp->fd_ofiles, M_FILEDESC);
1330 	fdp->fd_ofiles = ntable;
1331 	fdp->fd_ofileflags = nfileflags;
1332 	if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
1333 		bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap));
1334 		if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
1335 			free(fdp->fd_map, M_FILEDESC);
1336 		fdp->fd_map = nmap;
1337 	}
1338 	fdp->fd_nfiles = nnfiles;
1339 }
1340 
1341 /*
1342  * Allocate a file descriptor for the process.
1343  */
1344 int
1345 fdalloc(struct thread *td, int minfd, int *result)
1346 {
1347 	struct proc *p = td->td_proc;
1348 	struct filedesc *fdp = p->p_fd;
1349 	int fd = -1, maxfd;
1350 
1351 	FILEDESC_XLOCK_ASSERT(fdp);
1352 
1353 	if (fdp->fd_freefile > minfd)
1354 		minfd = fdp->fd_freefile;
1355 
1356 	PROC_LOCK(p);
1357 	maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
1358 	PROC_UNLOCK(p);
1359 
1360 	/*
1361 	 * Search the bitmap for a free descriptor.  If none is found, try
1362 	 * to grow the file table.  Keep at it until we either get a file
1363 	 * descriptor or run into process or system limits; fdgrowtable()
1364 	 * may drop the filedesc lock, so we're in a race.
1365 	 */
1366 	for (;;) {
1367 		fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
1368 		if (fd >= maxfd)
1369 			return (EMFILE);
1370 		if (fd < fdp->fd_nfiles)
1371 			break;
1372 		fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd));
1373 	}
1374 
1375 	/*
1376 	 * Perform some sanity checks, then mark the file descriptor as
1377 	 * used and return it to the caller.
1378 	 */
1379 	KASSERT(!fdisused(fdp, fd),
1380 	    ("fd_first_free() returned non-free descriptor"));
1381 	KASSERT(fdp->fd_ofiles[fd] == NULL,
1382 	    ("free descriptor isn't"));
1383 	fdp->fd_ofileflags[fd] = 0; /* XXX needed? */
1384 	fdused(fdp, fd);
1385 	*result = fd;
1386 	return (0);
1387 }
1388 
1389 /*
1390  * Check to see whether n user file descriptors are available to the process
1391  * p.
1392  */
1393 int
1394 fdavail(struct thread *td, int n)
1395 {
1396 	struct proc *p = td->td_proc;
1397 	struct filedesc *fdp = td->td_proc->p_fd;
1398 	struct file **fpp;
1399 	int i, lim, last;
1400 
1401 	FILEDESC_LOCK_ASSERT(fdp);
1402 
1403 	PROC_LOCK(p);
1404 	lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
1405 	PROC_UNLOCK(p);
1406 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
1407 		return (1);
1408 	last = min(fdp->fd_nfiles, lim);
1409 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
1410 	for (i = last - fdp->fd_freefile; --i >= 0; fpp++) {
1411 		if (*fpp == NULL && --n <= 0)
1412 			return (1);
1413 	}
1414 	return (0);
1415 }
1416 
1417 /*
1418  * Create a new open file structure and allocate a file decriptor for the
1419  * process that refers to it.  We add one reference to the file for the
1420  * descriptor table and one reference for resultfp. This is to prevent us
1421  * being preempted and the entry in the descriptor table closed after we
1422  * release the FILEDESC lock.
1423  */
1424 int
1425 falloc(struct thread *td, struct file **resultfp, int *resultfd)
1426 {
1427 	struct proc *p = td->td_proc;
1428 	struct file *fp;
1429 	int error, i;
1430 	int maxuserfiles = maxfiles - (maxfiles / 20);
1431 	static struct timeval lastfail;
1432 	static int curfail;
1433 
1434 	fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
1435 	if ((openfiles >= maxuserfiles &&
1436 	    priv_check(td, PRIV_MAXFILES) != 0) ||
1437 	    openfiles >= maxfiles) {
1438 		if (ppsratecheck(&lastfail, &curfail, 1)) {
1439 			printf("kern.maxfiles limit exceeded by uid %i, please see tuning(7).\n",
1440 				td->td_ucred->cr_ruid);
1441 		}
1442 		uma_zfree(file_zone, fp);
1443 		return (ENFILE);
1444 	}
1445 	atomic_add_int(&openfiles, 1);
1446 
1447 	/*
1448 	 * If the process has file descriptor zero open, add the new file
1449 	 * descriptor to the list of open files at that point, otherwise
1450 	 * put it at the front of the list of open files.
1451 	 */
1452 	fp->f_count = 1;
1453 	if (resultfp)
1454 		fp->f_count++;
1455 	fp->f_cred = crhold(td->td_ucred);
1456 	fp->f_ops = &badfileops;
1457 	fp->f_data = NULL;
1458 	fp->f_vnode = NULL;
1459 	FILEDESC_XLOCK(p->p_fd);
1460 	if ((error = fdalloc(td, 0, &i))) {
1461 		FILEDESC_XUNLOCK(p->p_fd);
1462 		fdrop(fp, td);
1463 		if (resultfp)
1464 			fdrop(fp, td);
1465 		return (error);
1466 	}
1467 	p->p_fd->fd_ofiles[i] = fp;
1468 	FILEDESC_XUNLOCK(p->p_fd);
1469 	if (resultfp)
1470 		*resultfp = fp;
1471 	if (resultfd)
1472 		*resultfd = i;
1473 	return (0);
1474 }
1475 
1476 /*
1477  * Build a new filedesc structure from another.
1478  * Copy the current, root, and jail root vnode references.
1479  */
1480 struct filedesc *
1481 fdinit(struct filedesc *fdp)
1482 {
1483 	struct filedesc0 *newfdp;
1484 
1485 	newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO);
1486 	FILEDESC_LOCK_INIT(&newfdp->fd_fd);
1487 	if (fdp != NULL) {
1488 		FILEDESC_XLOCK(fdp);
1489 		newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
1490 		if (newfdp->fd_fd.fd_cdir)
1491 			VREF(newfdp->fd_fd.fd_cdir);
1492 		newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
1493 		if (newfdp->fd_fd.fd_rdir)
1494 			VREF(newfdp->fd_fd.fd_rdir);
1495 		newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
1496 		if (newfdp->fd_fd.fd_jdir)
1497 			VREF(newfdp->fd_fd.fd_jdir);
1498 		FILEDESC_XUNLOCK(fdp);
1499 	}
1500 
1501 	/* Create the file descriptor table. */
1502 	newfdp->fd_fd.fd_refcnt = 1;
1503 	newfdp->fd_fd.fd_holdcnt = 1;
1504 	newfdp->fd_fd.fd_cmask = CMASK;
1505 	newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
1506 	newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
1507 	newfdp->fd_fd.fd_nfiles = NDFILE;
1508 	newfdp->fd_fd.fd_map = newfdp->fd_dmap;
1509 	newfdp->fd_fd.fd_lastfile = -1;
1510 	return (&newfdp->fd_fd);
1511 }
1512 
1513 static struct filedesc *
1514 fdhold(struct proc *p)
1515 {
1516 	struct filedesc *fdp;
1517 
1518 	mtx_lock(&fdesc_mtx);
1519 	fdp = p->p_fd;
1520 	if (fdp != NULL)
1521 		fdp->fd_holdcnt++;
1522 	mtx_unlock(&fdesc_mtx);
1523 	return (fdp);
1524 }
1525 
1526 static void
1527 fddrop(struct filedesc *fdp)
1528 {
1529 	int i;
1530 
1531 	mtx_lock(&fdesc_mtx);
1532 	i = --fdp->fd_holdcnt;
1533 	mtx_unlock(&fdesc_mtx);
1534 	if (i > 0)
1535 		return;
1536 
1537 	FILEDESC_LOCK_DESTROY(fdp);
1538 	FREE(fdp, M_FILEDESC);
1539 }
1540 
1541 /*
1542  * Share a filedesc structure.
1543  */
1544 struct filedesc *
1545 fdshare(struct filedesc *fdp)
1546 {
1547 
1548 	FILEDESC_XLOCK(fdp);
1549 	fdp->fd_refcnt++;
1550 	FILEDESC_XUNLOCK(fdp);
1551 	return (fdp);
1552 }
1553 
1554 /*
1555  * Unshare a filedesc structure, if necessary by making a copy
1556  */
1557 void
1558 fdunshare(struct proc *p, struct thread *td)
1559 {
1560 
1561 	FILEDESC_XLOCK(p->p_fd);
1562 	if (p->p_fd->fd_refcnt > 1) {
1563 		struct filedesc *tmp;
1564 
1565 		FILEDESC_XUNLOCK(p->p_fd);
1566 		tmp = fdcopy(p->p_fd);
1567 		fdfree(td);
1568 		p->p_fd = tmp;
1569 	} else
1570 		FILEDESC_XUNLOCK(p->p_fd);
1571 }
1572 
1573 /*
1574  * Copy a filedesc structure.  A NULL pointer in returns a NULL reference,
1575  * this is to ease callers, not catch errors.
1576  */
1577 struct filedesc *
1578 fdcopy(struct filedesc *fdp)
1579 {
1580 	struct filedesc *newfdp;
1581 	int i;
1582 
1583 	/* Certain daemons might not have file descriptors. */
1584 	if (fdp == NULL)
1585 		return (NULL);
1586 
1587 	newfdp = fdinit(fdp);
1588 	FILEDESC_SLOCK(fdp);
1589 	while (fdp->fd_lastfile >= newfdp->fd_nfiles) {
1590 		FILEDESC_SUNLOCK(fdp);
1591 		FILEDESC_XLOCK(newfdp);
1592 		fdgrowtable(newfdp, fdp->fd_lastfile + 1);
1593 		FILEDESC_XUNLOCK(newfdp);
1594 		FILEDESC_SLOCK(fdp);
1595 	}
1596 	/* copy everything except kqueue descriptors */
1597 	newfdp->fd_freefile = -1;
1598 	for (i = 0; i <= fdp->fd_lastfile; ++i) {
1599 		if (fdisused(fdp, i) &&
1600 		    fdp->fd_ofiles[i]->f_type != DTYPE_KQUEUE) {
1601 			newfdp->fd_ofiles[i] = fdp->fd_ofiles[i];
1602 			newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i];
1603 			fhold(newfdp->fd_ofiles[i]);
1604 			newfdp->fd_lastfile = i;
1605 		} else {
1606 			if (newfdp->fd_freefile == -1)
1607 				newfdp->fd_freefile = i;
1608 		}
1609 	}
1610 	FILEDESC_SUNLOCK(fdp);
1611 	FILEDESC_XLOCK(newfdp);
1612 	for (i = 0; i <= newfdp->fd_lastfile; ++i)
1613 		if (newfdp->fd_ofiles[i] != NULL)
1614 			fdused(newfdp, i);
1615 	FILEDESC_XUNLOCK(newfdp);
1616 	FILEDESC_SLOCK(fdp);
1617 	if (newfdp->fd_freefile == -1)
1618 		newfdp->fd_freefile = i;
1619 	newfdp->fd_cmask = fdp->fd_cmask;
1620 	FILEDESC_SUNLOCK(fdp);
1621 	return (newfdp);
1622 }
1623 
1624 /*
1625  * Release a filedesc structure.
1626  */
1627 void
1628 fdfree(struct thread *td)
1629 {
1630 	struct filedesc *fdp;
1631 	struct file **fpp;
1632 	int i, locked;
1633 	struct filedesc_to_leader *fdtol;
1634 	struct file *fp;
1635 	struct vnode *cdir, *jdir, *rdir, *vp;
1636 	struct flock lf;
1637 
1638 	/* Certain daemons might not have file descriptors. */
1639 	fdp = td->td_proc->p_fd;
1640 	if (fdp == NULL)
1641 		return;
1642 
1643 	/* Check for special need to clear POSIX style locks */
1644 	fdtol = td->td_proc->p_fdtol;
1645 	if (fdtol != NULL) {
1646 		FILEDESC_XLOCK(fdp);
1647 		KASSERT(fdtol->fdl_refcount > 0,
1648 			("filedesc_to_refcount botch: fdl_refcount=%d",
1649 			 fdtol->fdl_refcount));
1650 		if (fdtol->fdl_refcount == 1 &&
1651 		    (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1652 			for (i = 0, fpp = fdp->fd_ofiles;
1653 			     i <= fdp->fd_lastfile;
1654 			     i++, fpp++) {
1655 				if (*fpp == NULL ||
1656 				    (*fpp)->f_type != DTYPE_VNODE)
1657 					continue;
1658 				fp = *fpp;
1659 				fhold(fp);
1660 				FILEDESC_XUNLOCK(fdp);
1661 				lf.l_whence = SEEK_SET;
1662 				lf.l_start = 0;
1663 				lf.l_len = 0;
1664 				lf.l_type = F_UNLCK;
1665 				vp = fp->f_vnode;
1666 				locked = VFS_LOCK_GIANT(vp->v_mount);
1667 				(void) VOP_ADVLOCK(vp,
1668 						   (caddr_t)td->td_proc->
1669 						   p_leader,
1670 						   F_UNLCK,
1671 						   &lf,
1672 						   F_POSIX);
1673 				VFS_UNLOCK_GIANT(locked);
1674 				FILEDESC_XLOCK(fdp);
1675 				fdrop(fp, td);
1676 				fpp = fdp->fd_ofiles + i;
1677 			}
1678 		}
1679 	retry:
1680 		if (fdtol->fdl_refcount == 1) {
1681 			if (fdp->fd_holdleaderscount > 0 &&
1682 			    (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1683 				/*
1684 				 * close() or do_dup() has cleared a reference
1685 				 * in a shared file descriptor table.
1686 				 */
1687 				fdp->fd_holdleaderswakeup = 1;
1688 				sx_sleep(&fdp->fd_holdleaderscount,
1689 				    FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
1690 				goto retry;
1691 			}
1692 			if (fdtol->fdl_holdcount > 0) {
1693 				/*
1694 				 * Ensure that fdtol->fdl_leader remains
1695 				 * valid in closef().
1696 				 */
1697 				fdtol->fdl_wakeup = 1;
1698 				sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
1699 				    "fdlhold", 0);
1700 				goto retry;
1701 			}
1702 		}
1703 		fdtol->fdl_refcount--;
1704 		if (fdtol->fdl_refcount == 0 &&
1705 		    fdtol->fdl_holdcount == 0) {
1706 			fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1707 			fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1708 		} else
1709 			fdtol = NULL;
1710 		td->td_proc->p_fdtol = NULL;
1711 		FILEDESC_XUNLOCK(fdp);
1712 		if (fdtol != NULL)
1713 			FREE(fdtol, M_FILEDESC_TO_LEADER);
1714 	}
1715 	FILEDESC_XLOCK(fdp);
1716 	i = --fdp->fd_refcnt;
1717 	FILEDESC_XUNLOCK(fdp);
1718 	if (i > 0)
1719 		return;
1720 	/*
1721 	 * We are the last reference to the structure, so we can
1722 	 * safely assume it will not change out from under us.
1723 	 */
1724 	fpp = fdp->fd_ofiles;
1725 	for (i = fdp->fd_lastfile; i-- >= 0; fpp++) {
1726 		if (*fpp)
1727 			(void) closef(*fpp, td);
1728 	}
1729 	FILEDESC_XLOCK(fdp);
1730 
1731 	/* XXX This should happen earlier. */
1732 	mtx_lock(&fdesc_mtx);
1733 	td->td_proc->p_fd = NULL;
1734 	mtx_unlock(&fdesc_mtx);
1735 
1736 	if (fdp->fd_nfiles > NDFILE)
1737 		FREE(fdp->fd_ofiles, M_FILEDESC);
1738 	if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
1739 		FREE(fdp->fd_map, M_FILEDESC);
1740 
1741 	fdp->fd_nfiles = 0;
1742 
1743 	cdir = fdp->fd_cdir;
1744 	fdp->fd_cdir = NULL;
1745 	rdir = fdp->fd_rdir;
1746 	fdp->fd_rdir = NULL;
1747 	jdir = fdp->fd_jdir;
1748 	fdp->fd_jdir = NULL;
1749 	FILEDESC_XUNLOCK(fdp);
1750 
1751 	if (cdir) {
1752 		locked = VFS_LOCK_GIANT(cdir->v_mount);
1753 		vrele(cdir);
1754 		VFS_UNLOCK_GIANT(locked);
1755 	}
1756 	if (rdir) {
1757 		locked = VFS_LOCK_GIANT(rdir->v_mount);
1758 		vrele(rdir);
1759 		VFS_UNLOCK_GIANT(locked);
1760 	}
1761 	if (jdir) {
1762 		locked = VFS_LOCK_GIANT(jdir->v_mount);
1763 		vrele(jdir);
1764 		VFS_UNLOCK_GIANT(locked);
1765 	}
1766 
1767 	fddrop(fdp);
1768 }
1769 
1770 /*
1771  * For setugid programs, we don't want to people to use that setugidness
1772  * to generate error messages which write to a file which otherwise would
1773  * otherwise be off-limits to the process.  We check for filesystems where
1774  * the vnode can change out from under us after execve (like [lin]procfs).
1775  *
1776  * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1777  * sufficient.  We also don't check for setugidness since we know we are.
1778  */
1779 static int
1780 is_unsafe(struct file *fp)
1781 {
1782 	if (fp->f_type == DTYPE_VNODE) {
1783 		struct vnode *vp = fp->f_vnode;
1784 
1785 		if ((vp->v_vflag & VV_PROCDEP) != 0)
1786 			return (1);
1787 	}
1788 	return (0);
1789 }
1790 
1791 /*
1792  * Make this setguid thing safe, if at all possible.
1793  */
1794 void
1795 setugidsafety(struct thread *td)
1796 {
1797 	struct filedesc *fdp;
1798 	int i;
1799 
1800 	/* Certain daemons might not have file descriptors. */
1801 	fdp = td->td_proc->p_fd;
1802 	if (fdp == NULL)
1803 		return;
1804 
1805 	/*
1806 	 * Note: fdp->fd_ofiles may be reallocated out from under us while
1807 	 * we are blocked in a close.  Be careful!
1808 	 */
1809 	FILEDESC_XLOCK(fdp);
1810 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1811 		if (i > 2)
1812 			break;
1813 		if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
1814 			struct file *fp;
1815 
1816 			knote_fdclose(td, i);
1817 			/*
1818 			 * NULL-out descriptor prior to close to avoid
1819 			 * a race while close blocks.
1820 			 */
1821 			fp = fdp->fd_ofiles[i];
1822 			fdp->fd_ofiles[i] = NULL;
1823 			fdp->fd_ofileflags[i] = 0;
1824 			fdunused(fdp, i);
1825 			FILEDESC_XUNLOCK(fdp);
1826 			(void) closef(fp, td);
1827 			FILEDESC_XLOCK(fdp);
1828 		}
1829 	}
1830 	FILEDESC_XUNLOCK(fdp);
1831 }
1832 
1833 /*
1834  * If a specific file object occupies a specific file descriptor, close the
1835  * file descriptor entry and drop a reference on the file object.  This is a
1836  * convenience function to handle a subsequent error in a function that calls
1837  * falloc() that handles the race that another thread might have closed the
1838  * file descriptor out from under the thread creating the file object.
1839  */
1840 void
1841 fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
1842 {
1843 
1844 	FILEDESC_XLOCK(fdp);
1845 	if (fdp->fd_ofiles[idx] == fp) {
1846 		fdp->fd_ofiles[idx] = NULL;
1847 		fdunused(fdp, idx);
1848 		FILEDESC_XUNLOCK(fdp);
1849 		fdrop(fp, td);
1850 	} else
1851 		FILEDESC_XUNLOCK(fdp);
1852 }
1853 
1854 /*
1855  * Close any files on exec?
1856  */
1857 void
1858 fdcloseexec(struct thread *td)
1859 {
1860 	struct filedesc *fdp;
1861 	int i;
1862 
1863 	/* Certain daemons might not have file descriptors. */
1864 	fdp = td->td_proc->p_fd;
1865 	if (fdp == NULL)
1866 		return;
1867 
1868 	FILEDESC_XLOCK(fdp);
1869 
1870 	/*
1871 	 * We cannot cache fd_ofiles or fd_ofileflags since operations
1872 	 * may block and rip them out from under us.
1873 	 */
1874 	for (i = 0; i <= fdp->fd_lastfile; i++) {
1875 		if (fdp->fd_ofiles[i] != NULL &&
1876 		    (fdp->fd_ofiles[i]->f_type == DTYPE_MQUEUE ||
1877 		    (fdp->fd_ofileflags[i] & UF_EXCLOSE))) {
1878 			struct file *fp;
1879 
1880 			knote_fdclose(td, i);
1881 			/*
1882 			 * NULL-out descriptor prior to close to avoid
1883 			 * a race while close blocks.
1884 			 */
1885 			fp = fdp->fd_ofiles[i];
1886 			fdp->fd_ofiles[i] = NULL;
1887 			fdp->fd_ofileflags[i] = 0;
1888 			fdunused(fdp, i);
1889 			if (fp->f_type == DTYPE_MQUEUE)
1890 				mq_fdclose(td, i, fp);
1891 			FILEDESC_XUNLOCK(fdp);
1892 			(void) closef(fp, td);
1893 			FILEDESC_XLOCK(fdp);
1894 		}
1895 	}
1896 	FILEDESC_XUNLOCK(fdp);
1897 }
1898 
1899 /*
1900  * It is unsafe for set[ug]id processes to be started with file
1901  * descriptors 0..2 closed, as these descriptors are given implicit
1902  * significance in the Standard C library.  fdcheckstd() will create a
1903  * descriptor referencing /dev/null for each of stdin, stdout, and
1904  * stderr that is not already open.
1905  */
1906 int
1907 fdcheckstd(struct thread *td)
1908 {
1909 	struct filedesc *fdp;
1910 	register_t retval, save;
1911 	int i, error, devnull;
1912 
1913 	fdp = td->td_proc->p_fd;
1914 	if (fdp == NULL)
1915 		return (0);
1916 	KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
1917 	devnull = -1;
1918 	error = 0;
1919 	for (i = 0; i < 3; i++) {
1920 		if (fdp->fd_ofiles[i] != NULL)
1921 			continue;
1922 		if (devnull < 0) {
1923 			save = td->td_retval[0];
1924 			error = kern_open(td, "/dev/null", UIO_SYSSPACE,
1925 			    O_RDWR, 0);
1926 			devnull = td->td_retval[0];
1927 			KASSERT(devnull == i, ("oof, we didn't get our fd"));
1928 			td->td_retval[0] = save;
1929 			if (error)
1930 				break;
1931 		} else {
1932 			error = do_dup(td, DUP_FIXED, devnull, i, &retval);
1933 			if (error != 0)
1934 				break;
1935 		}
1936 	}
1937 	return (error);
1938 }
1939 
1940 /*
1941  * Internal form of close.  Decrement reference count on file structure.
1942  * Note: td may be NULL when closing a file that was being passed in a
1943  * message.
1944  *
1945  * XXXRW: Giant is not required for the caller, but often will be held; this
1946  * makes it moderately likely the Giant will be recursed in the VFS case.
1947  */
1948 int
1949 closef(struct file *fp, struct thread *td)
1950 {
1951 	struct vnode *vp;
1952 	struct flock lf;
1953 	struct filedesc_to_leader *fdtol;
1954 	struct filedesc *fdp;
1955 
1956 	/*
1957 	 * POSIX record locking dictates that any close releases ALL
1958 	 * locks owned by this process.  This is handled by setting
1959 	 * a flag in the unlock to free ONLY locks obeying POSIX
1960 	 * semantics, and not to free BSD-style file locks.
1961 	 * If the descriptor was in a message, POSIX-style locks
1962 	 * aren't passed with the descriptor, and the thread pointer
1963 	 * will be NULL.  Callers should be careful only to pass a
1964 	 * NULL thread pointer when there really is no owning
1965 	 * context that might have locks, or the locks will be
1966 	 * leaked.
1967 	 */
1968 	if (fp->f_type == DTYPE_VNODE && td != NULL) {
1969 		int vfslocked;
1970 
1971 		vp = fp->f_vnode;
1972 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1973 		if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
1974 			lf.l_whence = SEEK_SET;
1975 			lf.l_start = 0;
1976 			lf.l_len = 0;
1977 			lf.l_type = F_UNLCK;
1978 			(void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
1979 					   F_UNLCK, &lf, F_POSIX);
1980 		}
1981 		fdtol = td->td_proc->p_fdtol;
1982 		if (fdtol != NULL) {
1983 			/*
1984 			 * Handle special case where file descriptor table is
1985 			 * shared between multiple process leaders.
1986 			 */
1987 			fdp = td->td_proc->p_fd;
1988 			FILEDESC_XLOCK(fdp);
1989 			for (fdtol = fdtol->fdl_next;
1990 			     fdtol != td->td_proc->p_fdtol;
1991 			     fdtol = fdtol->fdl_next) {
1992 				if ((fdtol->fdl_leader->p_flag &
1993 				     P_ADVLOCK) == 0)
1994 					continue;
1995 				fdtol->fdl_holdcount++;
1996 				FILEDESC_XUNLOCK(fdp);
1997 				lf.l_whence = SEEK_SET;
1998 				lf.l_start = 0;
1999 				lf.l_len = 0;
2000 				lf.l_type = F_UNLCK;
2001 				vp = fp->f_vnode;
2002 				(void) VOP_ADVLOCK(vp,
2003 						   (caddr_t)fdtol->fdl_leader,
2004 						   F_UNLCK, &lf, F_POSIX);
2005 				FILEDESC_XLOCK(fdp);
2006 				fdtol->fdl_holdcount--;
2007 				if (fdtol->fdl_holdcount == 0 &&
2008 				    fdtol->fdl_wakeup != 0) {
2009 					fdtol->fdl_wakeup = 0;
2010 					wakeup(fdtol);
2011 				}
2012 			}
2013 			FILEDESC_XUNLOCK(fdp);
2014 		}
2015 		VFS_UNLOCK_GIANT(vfslocked);
2016 	}
2017 	return (fdrop(fp, td));
2018 }
2019 
2020 /*
2021  * Initialize the file pointer with the specified properties.
2022  *
2023  * The ops are set with release semantics to be certain that the flags, type,
2024  * and data are visible when ops is.  This is to prevent ops methods from being
2025  * called with bad data.
2026  */
2027 void
2028 finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2029 {
2030 	fp->f_data = data;
2031 	fp->f_flag = flag;
2032 	fp->f_type = type;
2033 	atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2034 }
2035 
2036 
2037 /*
2038  * Extract the file pointer associated with the specified descriptor for the
2039  * current user process.
2040  *
2041  * If the descriptor doesn't exist, EBADF is returned.
2042  *
2043  * If the descriptor exists but doesn't match 'flags' then return EBADF for
2044  * read attempts and EINVAL for write attempts.
2045  *
2046  * If 'hold' is set (non-zero) the file's refcount will be bumped on return.
2047  * It should be dropped with fdrop().  If it is not set, then the refcount
2048  * will not be bumped however the thread's filedesc struct will be returned
2049  * locked (for fgetsock).
2050  *
2051  * If an error occured the non-zero error is returned and *fpp is set to
2052  * NULL.  Otherwise *fpp is set and zero is returned.
2053  */
2054 static __inline int
2055 _fget(struct thread *td, int fd, struct file **fpp, int flags, int hold)
2056 {
2057 	struct filedesc *fdp;
2058 	struct file *fp;
2059 
2060 	*fpp = NULL;
2061 	if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
2062 		return (EBADF);
2063 	FILEDESC_SLOCK(fdp);
2064 	if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) {
2065 		FILEDESC_SUNLOCK(fdp);
2066 		return (EBADF);
2067 	}
2068 
2069 	/*
2070 	 * FREAD and FWRITE failure return EBADF as per POSIX.
2071 	 *
2072 	 * Only one flag, or 0, may be specified.
2073 	 */
2074 	if (flags == FREAD && (fp->f_flag & FREAD) == 0) {
2075 		FILEDESC_SUNLOCK(fdp);
2076 		return (EBADF);
2077 	}
2078 	if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) {
2079 		FILEDESC_SUNLOCK(fdp);
2080 		return (EBADF);
2081 	}
2082 	if (hold) {
2083 		fhold(fp);
2084 		FILEDESC_SUNLOCK(fdp);
2085 	}
2086 	*fpp = fp;
2087 	return (0);
2088 }
2089 
2090 int
2091 fget(struct thread *td, int fd, struct file **fpp)
2092 {
2093 
2094 	return(_fget(td, fd, fpp, 0, 1));
2095 }
2096 
2097 int
2098 fget_read(struct thread *td, int fd, struct file **fpp)
2099 {
2100 
2101 	return(_fget(td, fd, fpp, FREAD, 1));
2102 }
2103 
2104 int
2105 fget_write(struct thread *td, int fd, struct file **fpp)
2106 {
2107 
2108 	return(_fget(td, fd, fpp, FWRITE, 1));
2109 }
2110 
2111 /*
2112  * Like fget() but loads the underlying vnode, or returns an error if the
2113  * descriptor does not represent a vnode.  Note that pipes use vnodes but
2114  * never have VM objects.  The returned vnode will be vref()'d.
2115  *
2116  * XXX: what about the unused flags ?
2117  */
2118 static __inline int
2119 _fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags)
2120 {
2121 	struct file *fp;
2122 	int error;
2123 
2124 	*vpp = NULL;
2125 	if ((error = _fget(td, fd, &fp, flags, 0)) != 0)
2126 		return (error);
2127 	if (fp->f_vnode == NULL) {
2128 		error = EINVAL;
2129 	} else {
2130 		*vpp = fp->f_vnode;
2131 		vref(*vpp);
2132 	}
2133 	FILEDESC_SUNLOCK(td->td_proc->p_fd);
2134 	return (error);
2135 }
2136 
2137 int
2138 fgetvp(struct thread *td, int fd, struct vnode **vpp)
2139 {
2140 
2141 	return (_fgetvp(td, fd, vpp, 0));
2142 }
2143 
2144 int
2145 fgetvp_read(struct thread *td, int fd, struct vnode **vpp)
2146 {
2147 
2148 	return (_fgetvp(td, fd, vpp, FREAD));
2149 }
2150 
2151 #ifdef notyet
2152 int
2153 fgetvp_write(struct thread *td, int fd, struct vnode **vpp)
2154 {
2155 
2156 	return (_fgetvp(td, fd, vpp, FWRITE));
2157 }
2158 #endif
2159 
2160 /*
2161  * Like fget() but loads the underlying socket, or returns an error if the
2162  * descriptor does not represent a socket.
2163  *
2164  * We bump the ref count on the returned socket.  XXX Also obtain the SX lock
2165  * in the future.
2166  *
2167  * XXXRW: fgetsock() and fputsock() are deprecated, as consumers should rely
2168  * on their file descriptor reference to prevent the socket from being free'd
2169  * during use.
2170  */
2171 int
2172 fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp)
2173 {
2174 	struct file *fp;
2175 	int error;
2176 
2177 	*spp = NULL;
2178 	if (fflagp != NULL)
2179 		*fflagp = 0;
2180 	if ((error = _fget(td, fd, &fp, 0, 0)) != 0)
2181 		return (error);
2182 	if (fp->f_type != DTYPE_SOCKET) {
2183 		error = ENOTSOCK;
2184 	} else {
2185 		*spp = fp->f_data;
2186 		if (fflagp)
2187 			*fflagp = fp->f_flag;
2188 		SOCK_LOCK(*spp);
2189 		soref(*spp);
2190 		SOCK_UNLOCK(*spp);
2191 	}
2192 	FILEDESC_SUNLOCK(td->td_proc->p_fd);
2193 	return (error);
2194 }
2195 
2196 /*
2197  * Drop the reference count on the socket and XXX release the SX lock in the
2198  * future.  The last reference closes the socket.
2199  *
2200  * XXXRW: fputsock() is deprecated, see comment for fgetsock().
2201  */
2202 void
2203 fputsock(struct socket *so)
2204 {
2205 
2206 	ACCEPT_LOCK();
2207 	SOCK_LOCK(so);
2208 	sorele(so);
2209 }
2210 
2211 /*
2212  * Handle the last reference to a file being closed.
2213  */
2214 int
2215 _fdrop(struct file *fp, struct thread *td)
2216 {
2217 	int error;
2218 
2219 	error = 0;
2220 	if (fp->f_count != 0)
2221 		panic("fdrop: count %d", fp->f_count);
2222 	if (fp->f_ops != &badfileops)
2223 		error = fo_close(fp, td);
2224 	atomic_subtract_int(&openfiles, 1);
2225 	crfree(fp->f_cred);
2226 	uma_zfree(file_zone, fp);
2227 
2228 	return (error);
2229 }
2230 
2231 /*
2232  * Apply an advisory lock on a file descriptor.
2233  *
2234  * Just attempt to get a record lock of the requested type on the entire file
2235  * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2236  */
2237 #ifndef _SYS_SYSPROTO_H_
2238 struct flock_args {
2239 	int	fd;
2240 	int	how;
2241 };
2242 #endif
2243 /* ARGSUSED */
2244 int
2245 flock(struct thread *td, struct flock_args *uap)
2246 {
2247 	struct file *fp;
2248 	struct vnode *vp;
2249 	struct flock lf;
2250 	int vfslocked;
2251 	int error;
2252 
2253 	if ((error = fget(td, uap->fd, &fp)) != 0)
2254 		return (error);
2255 	if (fp->f_type != DTYPE_VNODE) {
2256 		fdrop(fp, td);
2257 		return (EOPNOTSUPP);
2258 	}
2259 
2260 	vp = fp->f_vnode;
2261 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2262 	lf.l_whence = SEEK_SET;
2263 	lf.l_start = 0;
2264 	lf.l_len = 0;
2265 	if (uap->how & LOCK_UN) {
2266 		lf.l_type = F_UNLCK;
2267 		atomic_clear_int(&fp->f_flag, FHASLOCK);
2268 		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
2269 		goto done2;
2270 	}
2271 	if (uap->how & LOCK_EX)
2272 		lf.l_type = F_WRLCK;
2273 	else if (uap->how & LOCK_SH)
2274 		lf.l_type = F_RDLCK;
2275 	else {
2276 		error = EBADF;
2277 		goto done2;
2278 	}
2279 	atomic_set_int(&fp->f_flag, FHASLOCK);
2280 	error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
2281 	    (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
2282 done2:
2283 	fdrop(fp, td);
2284 	VFS_UNLOCK_GIANT(vfslocked);
2285 	return (error);
2286 }
2287 /*
2288  * Duplicate the specified descriptor to a free descriptor.
2289  */
2290 int
2291 dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd, int mode, int error)
2292 {
2293 	struct file *wfp;
2294 	struct file *fp;
2295 
2296 	/*
2297 	 * If the to-be-dup'd fd number is greater than the allowed number
2298 	 * of file descriptors, or the fd to be dup'd has already been
2299 	 * closed, then reject.
2300 	 */
2301 	FILEDESC_XLOCK(fdp);
2302 	if (dfd < 0 || dfd >= fdp->fd_nfiles ||
2303 	    (wfp = fdp->fd_ofiles[dfd]) == NULL) {
2304 		FILEDESC_XUNLOCK(fdp);
2305 		return (EBADF);
2306 	}
2307 
2308 	/*
2309 	 * There are two cases of interest here.
2310 	 *
2311 	 * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
2312 	 *
2313 	 * For ENXIO steal away the file structure from (dfd) and store it in
2314 	 * (indx).  (dfd) is effectively closed by this operation.
2315 	 *
2316 	 * Any other error code is just returned.
2317 	 */
2318 	switch (error) {
2319 	case ENODEV:
2320 		/*
2321 		 * Check that the mode the file is being opened for is a
2322 		 * subset of the mode of the existing descriptor.
2323 		 */
2324 		if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
2325 			FILEDESC_XUNLOCK(fdp);
2326 			return (EACCES);
2327 		}
2328 		fp = fdp->fd_ofiles[indx];
2329 		fdp->fd_ofiles[indx] = wfp;
2330 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
2331 		if (fp == NULL)
2332 			fdused(fdp, indx);
2333 		fhold(wfp);
2334 		FILEDESC_XUNLOCK(fdp);
2335 		if (fp != NULL)
2336 			/*
2337 			 * We now own the reference to fp that the ofiles[]
2338 			 * array used to own.  Release it.
2339 			 */
2340 			fdrop(fp, td);
2341 		return (0);
2342 
2343 	case ENXIO:
2344 		/*
2345 		 * Steal away the file pointer from dfd and stuff it into indx.
2346 		 */
2347 		fp = fdp->fd_ofiles[indx];
2348 		fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd];
2349 		fdp->fd_ofiles[dfd] = NULL;
2350 		fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
2351 		fdp->fd_ofileflags[dfd] = 0;
2352 		fdunused(fdp, dfd);
2353 		if (fp == NULL)
2354 			fdused(fdp, indx);
2355 		FILEDESC_XUNLOCK(fdp);
2356 
2357 		/*
2358 		 * We now own the reference to fp that the ofiles[] array
2359 		 * used to own.  Release it.
2360 		 */
2361 		if (fp != NULL)
2362 			fdrop(fp, td);
2363 		return (0);
2364 
2365 	default:
2366 		FILEDESC_XUNLOCK(fdp);
2367 		return (error);
2368 	}
2369 	/* NOTREACHED */
2370 }
2371 
2372 /*
2373  * Scan all active processes to see if any of them have a current or root
2374  * directory of `olddp'. If so, replace them with the new mount point.
2375  */
2376 void
2377 mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
2378 {
2379 	struct filedesc *fdp;
2380 	struct proc *p;
2381 	int nrele;
2382 
2383 	if (vrefcnt(olddp) == 1)
2384 		return;
2385 	sx_slock(&allproc_lock);
2386 	FOREACH_PROC_IN_SYSTEM(p) {
2387 		fdp = fdhold(p);
2388 		if (fdp == NULL)
2389 			continue;
2390 		nrele = 0;
2391 		FILEDESC_XLOCK(fdp);
2392 		if (fdp->fd_cdir == olddp) {
2393 			vref(newdp);
2394 			fdp->fd_cdir = newdp;
2395 			nrele++;
2396 		}
2397 		if (fdp->fd_rdir == olddp) {
2398 			vref(newdp);
2399 			fdp->fd_rdir = newdp;
2400 			nrele++;
2401 		}
2402 		FILEDESC_XUNLOCK(fdp);
2403 		fddrop(fdp);
2404 		while (nrele--)
2405 			vrele(olddp);
2406 	}
2407 	sx_sunlock(&allproc_lock);
2408 	if (rootvnode == olddp) {
2409 		vrele(rootvnode);
2410 		vref(newdp);
2411 		rootvnode = newdp;
2412 	}
2413 }
2414 
2415 struct filedesc_to_leader *
2416 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
2417 {
2418 	struct filedesc_to_leader *fdtol;
2419 
2420 	MALLOC(fdtol, struct filedesc_to_leader *,
2421 	       sizeof(struct filedesc_to_leader),
2422 	       M_FILEDESC_TO_LEADER,
2423 	       M_WAITOK);
2424 	fdtol->fdl_refcount = 1;
2425 	fdtol->fdl_holdcount = 0;
2426 	fdtol->fdl_wakeup = 0;
2427 	fdtol->fdl_leader = leader;
2428 	if (old != NULL) {
2429 		FILEDESC_XLOCK(fdp);
2430 		fdtol->fdl_next = old->fdl_next;
2431 		fdtol->fdl_prev = old;
2432 		old->fdl_next = fdtol;
2433 		fdtol->fdl_next->fdl_prev = fdtol;
2434 		FILEDESC_XUNLOCK(fdp);
2435 	} else {
2436 		fdtol->fdl_next = fdtol;
2437 		fdtol->fdl_prev = fdtol;
2438 	}
2439 	return (fdtol);
2440 }
2441 
2442 /*
2443  * Get file structures globally.
2444  */
2445 static int
2446 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2447 {
2448 	struct xfile xf;
2449 	struct filedesc *fdp;
2450 	struct file *fp;
2451 	struct proc *p;
2452 	int error, n;
2453 
2454 	error = sysctl_wire_old_buffer(req, 0);
2455 	if (error != 0)
2456 		return (error);
2457 	if (req->oldptr == NULL) {
2458 		n = 0;
2459 		sx_slock(&allproc_lock);
2460 		FOREACH_PROC_IN_SYSTEM(p) {
2461 			if (p->p_state == PRS_NEW)
2462 				continue;
2463 			fdp = fdhold(p);
2464 			if (fdp == NULL)
2465 				continue;
2466 			/* overestimates sparse tables. */
2467 			if (fdp->fd_lastfile > 0)
2468 				n += fdp->fd_lastfile;
2469 			fddrop(fdp);
2470 		}
2471 		sx_sunlock(&allproc_lock);
2472 		return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
2473 	}
2474 	error = 0;
2475 	bzero(&xf, sizeof(xf));
2476 	xf.xf_size = sizeof(xf);
2477 	sx_slock(&allproc_lock);
2478 	FOREACH_PROC_IN_SYSTEM(p) {
2479 		if (p->p_state == PRS_NEW)
2480 			continue;
2481 		PROC_LOCK(p);
2482 		if (p_cansee(req->td, p) != 0) {
2483 			PROC_UNLOCK(p);
2484 			continue;
2485 		}
2486 		xf.xf_pid = p->p_pid;
2487 		xf.xf_uid = p->p_ucred->cr_uid;
2488 		PROC_UNLOCK(p);
2489 		fdp = fdhold(p);
2490 		if (fdp == NULL)
2491 			continue;
2492 		FILEDESC_SLOCK(fdp);
2493 		for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) {
2494 			if ((fp = fdp->fd_ofiles[n]) == NULL)
2495 				continue;
2496 			xf.xf_fd = n;
2497 			xf.xf_file = fp;
2498 			xf.xf_data = fp->f_data;
2499 			xf.xf_vnode = fp->f_vnode;
2500 			xf.xf_type = fp->f_type;
2501 			xf.xf_count = fp->f_count;
2502 			xf.xf_msgcount = 0;
2503 			xf.xf_offset = fp->f_offset;
2504 			xf.xf_flag = fp->f_flag;
2505 			error = SYSCTL_OUT(req, &xf, sizeof(xf));
2506 			if (error)
2507 				break;
2508 		}
2509 		FILEDESC_SUNLOCK(fdp);
2510 		fddrop(fdp);
2511 		if (error)
2512 			break;
2513 	}
2514 	sx_sunlock(&allproc_lock);
2515 	return (error);
2516 }
2517 
2518 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2519     0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
2520 
2521 static int
2522 export_vnode_for_sysctl(struct vnode *vp, int type,
2523     struct kinfo_file *kif, struct filedesc *fdp, struct sysctl_req *req)
2524 {
2525 	int error;
2526 	char *fullpath, *freepath;
2527 	int vfslocked;
2528 
2529 	bzero(kif, sizeof(*kif));
2530 	kif->kf_structsize = sizeof(*kif);
2531 
2532 	vref(vp);
2533 	kif->kf_fd = type;
2534 	kif->kf_type = KF_TYPE_VNODE;
2535 	/* This function only handles directories. */
2536 	KASSERT(vp->v_type == VDIR, ("export_vnode_for_sysctl: vnode not directory"));
2537 	kif->kf_vnode_type = KF_VTYPE_VDIR;
2538 
2539 	/*
2540 	 * This is not a true file descriptor, so we set a bogus refcount
2541 	 * and offset to indicate these fields should be ignored.
2542 	 */
2543 	kif->kf_ref_count = -1;
2544 	kif->kf_offset = -1;
2545 
2546 	freepath = NULL;
2547 	fullpath = "-";
2548 	FILEDESC_SUNLOCK(fdp);
2549 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2550 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2551 	vn_fullpath(curthread, vp, &fullpath, &freepath);
2552 	vput(vp);
2553 	VFS_UNLOCK_GIANT(vfslocked);
2554 	strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
2555 	if (freepath != NULL)
2556 		free(freepath, M_TEMP);
2557 	error = SYSCTL_OUT(req, kif, sizeof(*kif));
2558 	FILEDESC_SLOCK(fdp);
2559 	return (error);
2560 }
2561 
2562 /*
2563  * Get per-process file descriptors for use by procstat(1), et al.
2564  */
2565 static int
2566 sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
2567 {
2568 	char *fullpath, *freepath;
2569 	struct kinfo_file *kif;
2570 	struct filedesc *fdp;
2571 	int error, i, *name;
2572 	struct socket *so;
2573 	struct vnode *vp;
2574 	struct file *fp;
2575 	struct proc *p;
2576 	int vfslocked;
2577 
2578 	name = (int *)arg1;
2579 	if ((p = pfind((pid_t)name[0])) == NULL)
2580 		return (ESRCH);
2581 	if ((error = p_candebug(curthread, p))) {
2582 		PROC_UNLOCK(p);
2583 		return (error);
2584 	}
2585 	fdp = fdhold(p);
2586 	PROC_UNLOCK(p);
2587 	kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
2588 	FILEDESC_SLOCK(fdp);
2589 	if (fdp->fd_cdir != NULL)
2590 		export_vnode_for_sysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif,
2591 				fdp, req);
2592 	if (fdp->fd_rdir != NULL)
2593 		export_vnode_for_sysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif,
2594 				fdp, req);
2595 	if (fdp->fd_jdir != NULL)
2596 		export_vnode_for_sysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif,
2597 				fdp, req);
2598 	for (i = 0; i < fdp->fd_nfiles; i++) {
2599 		if ((fp = fdp->fd_ofiles[i]) == NULL)
2600 			continue;
2601 		bzero(kif, sizeof(*kif));
2602 		kif->kf_structsize = sizeof(*kif);
2603 		vp = NULL;
2604 		so = NULL;
2605 		kif->kf_fd = i;
2606 		switch (fp->f_type) {
2607 		case DTYPE_VNODE:
2608 			kif->kf_type = KF_TYPE_VNODE;
2609 			vp = fp->f_vnode;
2610 			break;
2611 
2612 		case DTYPE_SOCKET:
2613 			kif->kf_type = KF_TYPE_SOCKET;
2614 			so = fp->f_data;
2615 			break;
2616 
2617 		case DTYPE_PIPE:
2618 			kif->kf_type = KF_TYPE_PIPE;
2619 			break;
2620 
2621 		case DTYPE_FIFO:
2622 			kif->kf_type = KF_TYPE_FIFO;
2623 			vp = fp->f_vnode;
2624 			vref(vp);
2625 			break;
2626 
2627 		case DTYPE_KQUEUE:
2628 			kif->kf_type = KF_TYPE_KQUEUE;
2629 			break;
2630 
2631 		case DTYPE_CRYPTO:
2632 			kif->kf_type = KF_TYPE_CRYPTO;
2633 			break;
2634 
2635 		case DTYPE_MQUEUE:
2636 			kif->kf_type = KF_TYPE_MQUEUE;
2637 			break;
2638 
2639 		case DTYPE_SHM:
2640 			kif->kf_type = KF_TYPE_SHM;
2641 			break;
2642 
2643 		default:
2644 			kif->kf_type = KF_TYPE_UNKNOWN;
2645 			break;
2646 		}
2647 		kif->kf_ref_count = fp->f_count;
2648 		if (fp->f_flag & FREAD)
2649 			kif->kf_flags |= KF_FLAG_READ;
2650 		if (fp->f_flag & FWRITE)
2651 			kif->kf_flags |= KF_FLAG_WRITE;
2652 		if (fp->f_flag & FAPPEND)
2653 			kif->kf_flags |= KF_FLAG_APPEND;
2654 		if (fp->f_flag & FASYNC)
2655 			kif->kf_flags |= KF_FLAG_ASYNC;
2656 		if (fp->f_flag & FFSYNC)
2657 			kif->kf_flags |= KF_FLAG_FSYNC;
2658 		if (fp->f_flag & FNONBLOCK)
2659 			kif->kf_flags |= KF_FLAG_NONBLOCK;
2660 		if (fp->f_flag & O_DIRECT)
2661 			kif->kf_flags |= KF_FLAG_DIRECT;
2662 		if (fp->f_flag & FHASLOCK)
2663 			kif->kf_flags |= KF_FLAG_HASLOCK;
2664 		kif->kf_offset = fp->f_offset;
2665 		if (vp != NULL) {
2666 			vref(vp);
2667 			switch (vp->v_type) {
2668 			case VNON:
2669 				kif->kf_vnode_type = KF_VTYPE_VNON;
2670 				break;
2671 			case VREG:
2672 				kif->kf_vnode_type = KF_VTYPE_VREG;
2673 				break;
2674 			case VDIR:
2675 				kif->kf_vnode_type = KF_VTYPE_VDIR;
2676 				break;
2677 			case VBLK:
2678 				kif->kf_vnode_type = KF_VTYPE_VBLK;
2679 				break;
2680 			case VCHR:
2681 				kif->kf_vnode_type = KF_VTYPE_VCHR;
2682 				break;
2683 			case VLNK:
2684 				kif->kf_vnode_type = KF_VTYPE_VLNK;
2685 				break;
2686 			case VSOCK:
2687 				kif->kf_vnode_type = KF_VTYPE_VSOCK;
2688 				break;
2689 			case VFIFO:
2690 				kif->kf_vnode_type = KF_VTYPE_VFIFO;
2691 				break;
2692 			case VBAD:
2693 				kif->kf_vnode_type = KF_VTYPE_VBAD;
2694 				break;
2695 			default:
2696 				kif->kf_vnode_type = KF_VTYPE_UNKNOWN;
2697 				break;
2698 			}
2699 			/*
2700 			 * It is OK to drop the filedesc lock here as we will
2701 			 * re-validate and re-evaluate its properties when
2702 			 * the loop continues.
2703 			 */
2704 			freepath = NULL;
2705 			fullpath = "-";
2706 			FILEDESC_SUNLOCK(fdp);
2707 			vfslocked = VFS_LOCK_GIANT(vp->v_mount);
2708 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2709 			vn_fullpath(curthread, vp, &fullpath, &freepath);
2710 			vput(vp);
2711 			VFS_UNLOCK_GIANT(vfslocked);
2712 			strlcpy(kif->kf_path, fullpath,
2713 			    sizeof(kif->kf_path));
2714 			if (freepath != NULL)
2715 				free(freepath, M_TEMP);
2716 			FILEDESC_SLOCK(fdp);
2717 		}
2718 		if (so != NULL) {
2719 			struct sockaddr *sa;
2720 
2721 			if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa)
2722 			    == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
2723 				bcopy(sa, &kif->kf_sa_local, sa->sa_len);
2724 				free(sa, M_SONAME);
2725 			}
2726 			if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa)
2727 			    == 00 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
2728 				bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
2729 				free(sa, M_SONAME);
2730 			}
2731 			kif->kf_sock_domain =
2732 			    so->so_proto->pr_domain->dom_family;
2733 			kif->kf_sock_type = so->so_type;
2734 			kif->kf_sock_protocol = so->so_proto->pr_protocol;
2735 		}
2736 		error = SYSCTL_OUT(req, kif, sizeof(*kif));
2737 		if (error)
2738 			break;
2739 	}
2740 	FILEDESC_SUNLOCK(fdp);
2741 	fddrop(fdp);
2742 	free(kif, M_TEMP);
2743 	return (0);
2744 }
2745 
2746 static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD,
2747     sysctl_kern_proc_filedesc, "Process filedesc entries");
2748 
2749 #ifdef DDB
2750 /*
2751  * For the purposes of debugging, generate a human-readable string for the
2752  * file type.
2753  */
2754 static const char *
2755 file_type_to_name(short type)
2756 {
2757 
2758 	switch (type) {
2759 	case 0:
2760 		return ("zero");
2761 	case DTYPE_VNODE:
2762 		return ("vnod");
2763 	case DTYPE_SOCKET:
2764 		return ("sock");
2765 	case DTYPE_PIPE:
2766 		return ("pipe");
2767 	case DTYPE_FIFO:
2768 		return ("fifo");
2769 	case DTYPE_KQUEUE:
2770 		return ("kque");
2771 	case DTYPE_CRYPTO:
2772 		return ("crpt");
2773 	case DTYPE_MQUEUE:
2774 		return ("mque");
2775 	case DTYPE_SHM:
2776 		return ("shm");
2777 	default:
2778 		return ("unkn");
2779 	}
2780 }
2781 
2782 /*
2783  * For the purposes of debugging, identify a process (if any, perhaps one of
2784  * many) that references the passed file in its file descriptor array. Return
2785  * NULL if none.
2786  */
2787 static struct proc *
2788 file_to_first_proc(struct file *fp)
2789 {
2790 	struct filedesc *fdp;
2791 	struct proc *p;
2792 	int n;
2793 
2794 	FOREACH_PROC_IN_SYSTEM(p) {
2795 		if (p->p_state == PRS_NEW)
2796 			continue;
2797 		fdp = p->p_fd;
2798 		if (fdp == NULL)
2799 			continue;
2800 		for (n = 0; n < fdp->fd_nfiles; n++) {
2801 			if (fp == fdp->fd_ofiles[n])
2802 				return (p);
2803 		}
2804 	}
2805 	return (NULL);
2806 }
2807 
2808 static void
2809 db_print_file(struct file *fp, int header)
2810 {
2811 	struct proc *p;
2812 
2813 	if (header)
2814 		db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n",
2815 		    "File", "Type", "Data", "Flag", "GCFl", "Count",
2816 		    "MCount", "Vnode", "FPID", "FCmd");
2817 	p = file_to_first_proc(fp);
2818 	db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp,
2819 	    file_type_to_name(fp->f_type), fp->f_data, fp->f_flag,
2820 	    0, fp->f_count, 0, fp->f_vnode,
2821 	    p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
2822 }
2823 
2824 DB_SHOW_COMMAND(file, db_show_file)
2825 {
2826 	struct file *fp;
2827 
2828 	if (!have_addr) {
2829 		db_printf("usage: show file <addr>\n");
2830 		return;
2831 	}
2832 	fp = (struct file *)addr;
2833 	db_print_file(fp, 1);
2834 }
2835 
2836 DB_SHOW_COMMAND(files, db_show_files)
2837 {
2838 	struct filedesc *fdp;
2839 	struct file *fp;
2840 	struct proc *p;
2841 	int header;
2842 	int n;
2843 
2844 	header = 1;
2845 	FOREACH_PROC_IN_SYSTEM(p) {
2846 		if (p->p_state == PRS_NEW)
2847 			continue;
2848 		if ((fdp = p->p_fd) == NULL)
2849 			continue;
2850 		for (n = 0; n < fdp->fd_nfiles; ++n) {
2851 			if ((fp = fdp->fd_ofiles[n]) == NULL)
2852 				continue;
2853 			db_print_file(fp, header);
2854 			header = 0;
2855 		}
2856 	}
2857 }
2858 #endif
2859 
2860 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
2861     &maxfilesperproc, 0, "Maximum files allowed open per process");
2862 
2863 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
2864     &maxfiles, 0, "Maximum number of files");
2865 
2866 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
2867     __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files");
2868 
2869 /* ARGSUSED*/
2870 static void
2871 filelistinit(void *dummy)
2872 {
2873 
2874 	file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
2875 	    NULL, NULL, UMA_ALIGN_PTR, 0);
2876 	mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
2877 	mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF);
2878 }
2879 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
2880 
2881 /*-------------------------------------------------------------------*/
2882 
2883 static int
2884 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td)
2885 {
2886 
2887 	return (EBADF);
2888 }
2889 
2890 static int
2891 badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td)
2892 {
2893 
2894 	return (EINVAL);
2895 }
2896 
2897 static int
2898 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td)
2899 {
2900 
2901 	return (EBADF);
2902 }
2903 
2904 static int
2905 badfo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td)
2906 {
2907 
2908 	return (0);
2909 }
2910 
2911 static int
2912 badfo_kqfilter(struct file *fp, struct knote *kn)
2913 {
2914 
2915 	return (EBADF);
2916 }
2917 
2918 static int
2919 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td)
2920 {
2921 
2922 	return (EBADF);
2923 }
2924 
2925 static int
2926 badfo_close(struct file *fp, struct thread *td)
2927 {
2928 
2929 	return (EBADF);
2930 }
2931 
2932 struct fileops badfileops = {
2933 	.fo_read = badfo_readwrite,
2934 	.fo_write = badfo_readwrite,
2935 	.fo_truncate = badfo_truncate,
2936 	.fo_ioctl = badfo_ioctl,
2937 	.fo_poll = badfo_poll,
2938 	.fo_kqfilter = badfo_kqfilter,
2939 	.fo_stat = badfo_stat,
2940 	.fo_close = badfo_close,
2941 };
2942 
2943 
2944 /*-------------------------------------------------------------------*/
2945 
2946 /*
2947  * File Descriptor pseudo-device driver (/dev/fd/).
2948  *
2949  * Opening minor device N dup()s the file (if any) connected to file
2950  * descriptor N belonging to the calling process.  Note that this driver
2951  * consists of only the ``open()'' routine, because all subsequent
2952  * references to this file will be direct to the other driver.
2953  *
2954  * XXX: we could give this one a cloning event handler if necessary.
2955  */
2956 
2957 /* ARGSUSED */
2958 static int
2959 fdopen(struct cdev *dev, int mode, int type, struct thread *td)
2960 {
2961 
2962 	/*
2963 	 * XXX Kludge: set curthread->td_dupfd to contain the value of the
2964 	 * the file descriptor being sought for duplication. The error
2965 	 * return ensures that the vnode for this device will be released
2966 	 * by vn_open. Open will detect this special error and take the
2967 	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
2968 	 * will simply report the error.
2969 	 */
2970 	td->td_dupfd = dev2unit(dev);
2971 	return (ENODEV);
2972 }
2973 
2974 static struct cdevsw fildesc_cdevsw = {
2975 	.d_version =	D_VERSION,
2976 	.d_flags =	D_NEEDGIANT,
2977 	.d_open =	fdopen,
2978 	.d_name =	"FD",
2979 };
2980 
2981 static void
2982 fildesc_drvinit(void *unused)
2983 {
2984 	struct cdev *dev;
2985 
2986 	dev = make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "fd/0");
2987 	make_dev_alias(dev, "stdin");
2988 	dev = make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "fd/1");
2989 	make_dev_alias(dev, "stdout");
2990 	dev = make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "fd/2");
2991 	make_dev_alias(dev, "stderr");
2992 }
2993 
2994 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);
2995