1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2015, Joyent Inc.
25 * Copyright 2024 Oxide Computer Company
26 */
27
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
30
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/signal.h>
37 #include <sys/cred.h>
38 #include <sys/user.h>
39 #include <sys/conf.h>
40 #include <sys/vfs.h>
41 #include <sys/vnode.h>
42 #include <sys/pathname.h>
43 #include <sys/file.h>
44 #include <sys/flock.h>
45 #include <sys/proc.h>
46 #include <sys/var.h>
47 #include <sys/cpuvar.h>
48 #include <sys/open.h>
49 #include <sys/cmn_err.h>
50 #include <sys/priocntl.h>
51 #include <sys/procset.h>
52 #include <sys/prsystm.h>
53 #include <sys/debug.h>
54 #include <sys/kmem.h>
55 #include <sys/atomic.h>
56 #include <sys/fcntl.h>
57 #include <sys/poll.h>
58 #include <sys/rctl.h>
59 #include <sys/port_impl.h>
60 #include <sys/dtrace.h>
61 #include <sys/stdbool.h>
62
63 #include <c2/audit.h>
64 #include <sys/nbmlock.h>
65
66 #ifdef DEBUG
67
68 static uint32_t afd_maxfd; /* # of entries in maximum allocated array */
69 static uint32_t afd_alloc; /* count of kmem_alloc()s */
70 static uint32_t afd_free; /* count of kmem_free()s */
71 static uint32_t afd_wait; /* count of waits on non-zero ref count */
72 #define MAXFD(x) (afd_maxfd = ((afd_maxfd >= (x))? afd_maxfd : (x)))
73 #define COUNT(x) atomic_inc_32(&x)
74
75 #else /* DEBUG */
76
77 #define MAXFD(x)
78 #define COUNT(x)
79
80 #endif /* DEBUG */
81
82 kmem_cache_t *file_cache;
83
84 static void port_close_fd(portfd_t *);
85
86 /*
87 * File descriptor allocation.
88 *
89 * fd_find(fip, minfd) finds the first available descriptor >= minfd.
90 * The most common case is open(2), in which minfd = 0, but we must also
91 * support fcntl(fd, F_DUPFD, minfd).
92 *
93 * The algorithm is as follows: we keep all file descriptors in an infix
94 * binary tree in which each node records the number of descriptors
95 * allocated in its right subtree, including itself. Starting at minfd,
96 * we ascend the tree until we find a non-fully allocated right subtree.
97 * We then descend that subtree in a binary search for the smallest fd.
98 * Finally, we ascend the tree again to increment the allocation count
99 * of every subtree containing the newly-allocated fd. Freeing an fd
100 * requires only the last step: we ascend the tree to decrement allocation
101 * counts. Each of these three steps (ascent to find non-full subtree,
102 * descent to find lowest fd, ascent to update allocation counts) is
103 * O(log n), thus the algorithm as a whole is O(log n).
104 *
105 * We don't implement the fd tree using the customary left/right/parent
106 * pointers, but instead take advantage of the glorious mathematics of
107 * full infix binary trees. For reference, here's an illustration of the
108 * logical structure of such a tree, rooted at 4 (binary 100), covering
109 * the range 1-7 (binary 001-111). Our canonical trees do not include
110 * fd 0; we'll deal with that later.
111 *
112 * 100
113 * / \
114 * / \
115 * 010 110
116 * / \ / \
117 * 001 011 101 111
118 *
119 * We make the following observations, all of which are easily proven by
120 * induction on the depth of the tree:
121 *
122 * (T1) The least-significant bit (LSB) of any node is equal to its level
123 * in the tree. In our example, nodes 001, 011, 101 and 111 are at
124 * level 0; nodes 010 and 110 are at level 1; and node 100 is at level 2.
125 *
126 * (T2) The child size (CSIZE) of node N -- that is, the total number of
127 * right-branch descendants in a child of node N, including itself -- is
128 * given by clearing all but the least significant bit of N. This
129 * follows immediately from (T1). Applying this rule to our example, we
130 * see that CSIZE(100) = 100, CSIZE(x10) = 10, and CSIZE(xx1) = 1.
131 *
132 * (T3) The nearest left ancestor (LPARENT) of node N -- that is, the nearest
133 * ancestor containing node N in its right child -- is given by clearing
134 * the LSB of N. For example, LPARENT(111) = 110 and LPARENT(110) = 100.
135 * Clearing the LSB of nodes 001, 010 or 100 yields zero, reflecting
136 * the fact that these are leftmost nodes. Note that this algorithm
137 * automatically skips generations as necessary. For example, the parent
138 * of node 101 is 110, which is a *right* ancestor (not what we want);
139 * but its grandparent is 100, which is a left ancestor. Clearing the LSB
140 * of 101 gets us to 100 directly, skipping right past the uninteresting
141 * generation (110).
142 *
143 * Note that since LPARENT clears the LSB, whereas CSIZE clears all *but*
144 * the LSB, we can express LPARENT() nicely in terms of CSIZE():
145 *
146 * LPARENT(N) = N - CSIZE(N)
147 *
148 * (T4) The nearest right ancestor (RPARENT) of node N is given by:
149 *
150 * RPARENT(N) = N + CSIZE(N)
151 *
152 * (T5) For every interior node, the children differ from their parent by
153 * CSIZE(parent) / 2. In our example, CSIZE(100) / 2 = 2 = 10 binary,
154 * and indeed, the children of 100 are 100 +/- 10 = 010 and 110.
155 *
156 * Next, we'll need a few two's-complement math tricks. Suppose a number,
157 * N, has the following form:
158 *
159 * N = xxxx10...0
160 *
161 * That is, the binary representation of N consists of some string of bits,
162 * then a 1, then all zeroes. This amounts to nothing more than saying that
163 * N has a least-significant bit, which is true for any N != 0. If we look
164 * at N and N - 1 together, we see that we can combine them in useful ways:
165 *
166 * N = xxxx10...0
167 * N - 1 = xxxx01...1
168 * ------------------------
169 * N & (N - 1) = xxxx000000
170 * N | (N - 1) = xxxx111111
171 * N ^ (N - 1) = 111111
172 *
173 * In particular, this suggests several easy ways to clear all but the LSB,
174 * which by (T2) is exactly what we need to determine CSIZE(N) = 10...0.
175 * We'll opt for this formulation:
176 *
177 * (C1) CSIZE(N) = (N - 1) ^ (N | (N - 1))
178 *
179 * Similarly, we have an easy way to determine LPARENT(N), which requires
180 * that we clear the LSB of N:
181 *
182 * (L1) LPARENT(N) = N & (N - 1)
183 *
184 * We note in the above relations that (N | (N - 1)) - N = CSIZE(N) - 1.
185 * When combined with (T4), this yields an easy way to compute RPARENT(N):
186 *
187 * (R1) RPARENT(N) = (N | (N - 1)) + 1
188 *
189 * Finally, to accommodate fd 0 we must adjust all of our results by +/-1 to
190 * move the fd range from [1, 2^n) to [0, 2^n - 1). This is straightforward,
191 * so there's no need to belabor the algebra; the revised relations become:
192 *
193 * (C1a) CSIZE(N) = N ^ (N | (N + 1))
194 *
195 * (L1a) LPARENT(N) = (N & (N + 1)) - 1
196 *
197 * (R1a) RPARENT(N) = N | (N + 1)
198 *
199 * This completes the mathematical framework. We now have all the tools
200 * we need to implement fd_find() and fd_reserve().
201 *
202 * fd_find(fip, minfd) finds the smallest available file descriptor >= minfd.
203 * It does not actually allocate the descriptor; that's done by fd_reserve().
204 * fd_find() proceeds in two steps:
205 *
206 * (1) Find the leftmost subtree that contains a descriptor >= minfd.
207 * We start at the right subtree rooted at minfd. If this subtree is
208 * not full -- if fip->fi_list[minfd].uf_alloc != CSIZE(minfd) -- then
209 * step 1 is done. Otherwise, we know that all fds in this subtree
210 * are taken, so we ascend to RPARENT(minfd) using (R1a). We repeat
211 * this process until we either find a candidate subtree or exceed
212 * fip->fi_nfiles. We use (C1a) to compute CSIZE().
213 *
214 * (2) Find the smallest fd in the subtree discovered by step 1.
215 * Starting at the root of this subtree, we descend to find the
216 * smallest available fd. Since the left children have the smaller
217 * fds, we will descend rightward only when the left child is full.
218 *
219 * We begin by comparing the number of allocated fds in the root
220 * to the number of allocated fds in its right child; if they differ
221 * by exactly CSIZE(child), we know the left subtree is full, so we
222 * descend right; that is, the right child becomes the search root.
223 * Otherwise we leave the root alone and start following the right
224 * child's left children. As fortune would have it, this is very
225 * simple computationally: by (T5), the right child of fd is just
226 * fd + size, where size = CSIZE(fd) / 2. Applying (T5) again,
227 * we find that the right child's left child is fd + size - (size / 2) =
228 * fd + (size / 2); *its* left child is fd + (size / 2) - (size / 4) =
229 * fd + (size / 4), and so on. In general, fd's right child's
230 * leftmost nth descendant is fd + (size >> n). Thus, to follow
231 * the right child's left descendants, we just halve the size in
232 * each iteration of the search.
233 *
234 * When we descend leftward, we must keep track of the number of fds
235 * that were allocated in all the right subtrees we rejected, so we
236 * know how many of the root fd's allocations are in the remaining
237 * (as yet unexplored) leftmost part of its right subtree. When we
238 * encounter a fully-allocated left child -- that is, when we find
239 * that fip->fi_list[fd].uf_alloc == ralloc + size -- we descend right
240 * (as described earlier), resetting ralloc to zero.
241 *
242 * fd_reserve(fip, fd, incr) either allocates or frees fd, depending
243 * on whether incr is 1 or -1. Starting at fd, fd_reserve() ascends
244 * the leftmost ancestors (see (T3)) and updates the allocation counts.
245 * At each step we use (L1a) to compute LPARENT(), the next left ancestor.
246 *
247 * flist_minsize() finds the minimal tree that still covers all
248 * used fds; as long as the allocation count of a root node is zero, we
249 * don't need that node or its right subtree.
250 *
251 * flist_nalloc() counts the number of allocated fds in the tree, by starting
252 * at the top of the tree and summing the right-subtree allocation counts as
253 * it descends leftwards.
254 *
255 * Note: we assume that flist_grow() will keep fip->fi_nfiles of the form
256 * 2^n - 1. This ensures that the fd trees are always full, which saves
257 * quite a bit of boundary checking.
258 */
259 static int
fd_find(uf_info_t * fip,int minfd)260 fd_find(uf_info_t *fip, int minfd)
261 {
262 int size, ralloc, fd;
263
264 ASSERT(MUTEX_HELD(&fip->fi_lock));
265 ASSERT((fip->fi_nfiles & (fip->fi_nfiles + 1)) == 0);
266
267 for (fd = minfd; (uint_t)fd < fip->fi_nfiles; fd |= fd + 1) {
268 size = fd ^ (fd | (fd + 1));
269 if (fip->fi_list[fd].uf_alloc == size)
270 continue;
271 for (ralloc = 0, size >>= 1; size != 0; size >>= 1) {
272 ralloc += fip->fi_list[fd + size].uf_alloc;
273 if (fip->fi_list[fd].uf_alloc == ralloc + size) {
274 fd += size;
275 ralloc = 0;
276 }
277 }
278 return (fd);
279 }
280 return (-1);
281 }
282
283 static void
fd_reserve(uf_info_t * fip,int fd,int incr)284 fd_reserve(uf_info_t *fip, int fd, int incr)
285 {
286 int pfd;
287 uf_entry_t *ufp = &fip->fi_list[fd];
288
289 ASSERT((uint_t)fd < fip->fi_nfiles);
290 ASSERT((ufp->uf_busy == 0 && incr == 1) ||
291 (ufp->uf_busy == 1 && incr == -1));
292 ASSERT(MUTEX_HELD(&ufp->uf_lock));
293 ASSERT(MUTEX_HELD(&fip->fi_lock));
294
295 for (pfd = fd; pfd >= 0; pfd = (pfd & (pfd + 1)) - 1)
296 fip->fi_list[pfd].uf_alloc += incr;
297
298 ufp->uf_busy += incr;
299 }
300
301 static int
flist_minsize(uf_info_t * fip)302 flist_minsize(uf_info_t *fip)
303 {
304 int fd;
305
306 /*
307 * We'd like to ASSERT(MUTEX_HELD(&fip->fi_lock)), but we're called
308 * by flist_fork(), which relies on other mechanisms for mutual
309 * exclusion.
310 */
311 ASSERT((fip->fi_nfiles & (fip->fi_nfiles + 1)) == 0);
312
313 for (fd = fip->fi_nfiles; fd != 0; fd >>= 1)
314 if (fip->fi_list[fd >> 1].uf_alloc != 0)
315 break;
316
317 return (fd);
318 }
319
320 static int
flist_nalloc(uf_info_t * fip)321 flist_nalloc(uf_info_t *fip)
322 {
323 int fd;
324 int nalloc = 0;
325
326 ASSERT(MUTEX_HELD(&fip->fi_lock));
327 ASSERT((fip->fi_nfiles & (fip->fi_nfiles + 1)) == 0);
328
329 for (fd = fip->fi_nfiles; fd != 0; fd >>= 1)
330 nalloc += fip->fi_list[fd >> 1].uf_alloc;
331
332 return (nalloc);
333 }
334
335 /*
336 * Increase size of the fi_list array to accommodate at least maxfd.
337 * We keep the size of the form 2^n - 1 for benefit of fd_find().
338 */
339 static void
flist_grow(int maxfd)340 flist_grow(int maxfd)
341 {
342 uf_info_t *fip = P_FINFO(curproc);
343 int newcnt, oldcnt;
344 uf_entry_t *src, *dst, *newlist, *oldlist, *newend, *oldend;
345 uf_rlist_t *urp;
346
347 for (newcnt = 1; newcnt <= maxfd; newcnt = (newcnt << 1) | 1)
348 continue;
349
350 newlist = kmem_zalloc(newcnt * sizeof (uf_entry_t), KM_SLEEP);
351
352 mutex_enter(&fip->fi_lock);
353 oldcnt = fip->fi_nfiles;
354 if (newcnt <= oldcnt) {
355 mutex_exit(&fip->fi_lock);
356 kmem_free(newlist, newcnt * sizeof (uf_entry_t));
357 return;
358 }
359 ASSERT((newcnt & (newcnt + 1)) == 0);
360 oldlist = fip->fi_list;
361 oldend = oldlist + oldcnt;
362 newend = newlist + oldcnt; /* no need to lock beyond old end */
363
364 /*
365 * fi_list and fi_nfiles cannot change while any uf_lock is held,
366 * so we must grab all the old locks *and* the new locks up to oldcnt.
367 * (Locks beyond the end of oldcnt aren't visible until we store
368 * the new fi_nfiles, which is the last thing we do before dropping
369 * all the locks, so there's no need to acquire these locks).
370 * Holding the new locks is necessary because when fi_list changes
371 * to point to the new list, fi_nfiles won't have been stored yet.
372 * If we *didn't* hold the new locks, someone doing a UF_ENTER()
373 * could see the new fi_list, grab the new uf_lock, and then see
374 * fi_nfiles change while the lock is held -- in violation of
375 * UF_ENTER() semantics.
376 */
377 for (src = oldlist; src < oldend; src++)
378 mutex_enter(&src->uf_lock);
379
380 for (dst = newlist; dst < newend; dst++)
381 mutex_enter(&dst->uf_lock);
382
383 for (src = oldlist, dst = newlist; src < oldend; src++, dst++) {
384 dst->uf_file = src->uf_file;
385 dst->uf_fpollinfo = src->uf_fpollinfo;
386 dst->uf_refcnt = src->uf_refcnt;
387 dst->uf_alloc = src->uf_alloc;
388 dst->uf_flag = src->uf_flag;
389 dst->uf_busy = src->uf_busy;
390 dst->uf_portfd = src->uf_portfd;
391 dst->uf_gen = src->uf_gen;
392 }
393
394 /*
395 * As soon as we store the new flist, future locking operations
396 * will use it. Therefore, we must ensure that all the state
397 * we've just established reaches global visibility before the
398 * new flist does.
399 */
400 membar_producer();
401 fip->fi_list = newlist;
402
403 /*
404 * Routines like getf() make an optimistic check on the validity
405 * of the supplied file descriptor: if it's less than the current
406 * value of fi_nfiles -- examined without any locks -- then it's
407 * safe to attempt a UF_ENTER() on that fd (which is a valid
408 * assumption because fi_nfiles only increases). Therefore, it
409 * is critical that the new value of fi_nfiles not reach global
410 * visibility until after the new fi_list: if it happened the
411 * other way around, getf() could see the new fi_nfiles and attempt
412 * a UF_ENTER() on the old fi_list, which would write beyond its
413 * end if the fd exceeded the old fi_nfiles.
414 */
415 membar_producer();
416 fip->fi_nfiles = newcnt;
417
418 /*
419 * The new state is consistent now, so we can drop all the locks.
420 */
421 for (dst = newlist; dst < newend; dst++)
422 mutex_exit(&dst->uf_lock);
423
424 for (src = oldlist; src < oldend; src++) {
425 /*
426 * If any threads are blocked on the old cvs, wake them.
427 * This will force them to wake up, discover that fi_list
428 * has changed, and go back to sleep on the new cvs.
429 */
430 cv_broadcast(&src->uf_wanted_cv);
431 cv_broadcast(&src->uf_closing_cv);
432 mutex_exit(&src->uf_lock);
433 }
434
435 mutex_exit(&fip->fi_lock);
436
437 /*
438 * Retire the old flist. We can't actually kmem_free() it now
439 * because someone may still have a pointer to it. Instead,
440 * we link it onto a list of retired flists. The new flist
441 * is at least double the size of the previous flist, so the
442 * total size of all retired flists will be less than the size
443 * of the current one (to prove, consider the sum of a geometric
444 * series in powers of 2). exit() frees the retired flists.
445 */
446 urp = kmem_zalloc(sizeof (uf_rlist_t), KM_SLEEP);
447 urp->ur_list = oldlist;
448 urp->ur_nfiles = oldcnt;
449
450 mutex_enter(&fip->fi_lock);
451 urp->ur_next = fip->fi_rlist;
452 fip->fi_rlist = urp;
453 mutex_exit(&fip->fi_lock);
454 }
455
456 /*
457 * Utility functions for keeping track of the active file descriptors.
458 */
459 void
clear_stale_fd()460 clear_stale_fd() /* called from post_syscall() */
461 {
462 afd_t *afd = &curthread->t_activefd;
463 int i;
464
465 /* uninitialized is ok here, a_nfd is then zero */
466 for (i = 0; i < afd->a_nfd; i++) {
467 /* assert that this should not be necessary */
468 ASSERT(afd->a_fd[i] == -1);
469 afd->a_fd[i] = -1;
470 }
471 afd->a_stale = 0;
472 }
473
474 void
free_afd(afd_t * afd)475 free_afd(afd_t *afd) /* called below and from thread_free() */
476 {
477 int i;
478
479 /* free the buffer if it was kmem_alloc()ed */
480 if (afd->a_nfd > sizeof (afd->a_buf) / sizeof (afd->a_buf[0])) {
481 COUNT(afd_free);
482 kmem_free(afd->a_fd, afd->a_nfd * sizeof (afd->a_fd[0]));
483 }
484
485 /* (re)initialize the structure */
486 afd->a_fd = &afd->a_buf[0];
487 afd->a_nfd = sizeof (afd->a_buf) / sizeof (afd->a_buf[0]);
488 afd->a_stale = 0;
489 for (i = 0; i < afd->a_nfd; i++)
490 afd->a_fd[i] = -1;
491 }
492
493 static void
set_active_fd(int fd)494 set_active_fd(int fd)
495 {
496 afd_t *afd = &curthread->t_activefd;
497 int i;
498 int *old_fd;
499 int old_nfd;
500 int *new_fd;
501 int new_nfd;
502
503 if (afd->a_nfd == 0) { /* first time initialization */
504 ASSERT(fd == -1);
505 mutex_enter(&afd->a_fdlock);
506 free_afd(afd);
507 mutex_exit(&afd->a_fdlock);
508 }
509
510 /* insert fd into vacant slot, if any */
511 for (i = 0; i < afd->a_nfd; i++) {
512 if (afd->a_fd[i] == -1) {
513 afd->a_fd[i] = fd;
514 return;
515 }
516 }
517
518 /*
519 * Reallocate the a_fd[] array to add one more slot.
520 */
521 ASSERT(fd == -1);
522 old_nfd = afd->a_nfd;
523 old_fd = afd->a_fd;
524 new_nfd = old_nfd + 1;
525 new_fd = kmem_alloc(new_nfd * sizeof (afd->a_fd[0]), KM_SLEEP);
526 MAXFD(new_nfd);
527 COUNT(afd_alloc);
528
529 mutex_enter(&afd->a_fdlock);
530 afd->a_fd = new_fd;
531 afd->a_nfd = new_nfd;
532 for (i = 0; i < old_nfd; i++)
533 afd->a_fd[i] = old_fd[i];
534 afd->a_fd[i] = fd;
535 mutex_exit(&afd->a_fdlock);
536
537 if (old_nfd > sizeof (afd->a_buf) / sizeof (afd->a_buf[0])) {
538 COUNT(afd_free);
539 kmem_free(old_fd, old_nfd * sizeof (afd->a_fd[0]));
540 }
541 }
542
543 void
clear_active_fd(int fd)544 clear_active_fd(int fd) /* called below and from aio.c */
545 {
546 afd_t *afd = &curthread->t_activefd;
547 int i;
548
549 for (i = 0; i < afd->a_nfd; i++) {
550 if (afd->a_fd[i] == fd) {
551 afd->a_fd[i] = -1;
552 break;
553 }
554 }
555 ASSERT(i < afd->a_nfd); /* not found is not ok */
556 }
557
558 /*
559 * Does this thread have this fd active?
560 */
561 static int
is_active_fd(kthread_t * t,int fd)562 is_active_fd(kthread_t *t, int fd)
563 {
564 afd_t *afd = &t->t_activefd;
565 int i;
566
567 ASSERT(t != curthread);
568 mutex_enter(&afd->a_fdlock);
569 /* uninitialized is ok here, a_nfd is then zero */
570 for (i = 0; i < afd->a_nfd; i++) {
571 if (afd->a_fd[i] == fd) {
572 mutex_exit(&afd->a_fdlock);
573 return (1);
574 }
575 }
576 mutex_exit(&afd->a_fdlock);
577 return (0);
578 }
579
580 /*
581 * Convert a user supplied file descriptor into a pointer to a file structure.
582 * Only task is to check range of the descriptor (soft resource limit was
583 * enforced at open time and shouldn't be checked here).
584 */
585 file_t *
getf_gen(int fd,uf_entry_gen_t * genp)586 getf_gen(int fd, uf_entry_gen_t *genp)
587 {
588 uf_info_t *fip = P_FINFO(curproc);
589 uf_entry_t *ufp;
590 file_t *fp;
591
592 if ((uint_t)fd >= fip->fi_nfiles)
593 return (NULL);
594
595 /*
596 * Reserve a slot in the active fd array now so we can call
597 * set_active_fd(fd) for real below, while still inside UF_ENTER().
598 */
599 set_active_fd(-1);
600
601 UF_ENTER(ufp, fip, fd);
602
603 if ((fp = ufp->uf_file) == NULL) {
604 UF_EXIT(ufp);
605
606 if (fd == fip->fi_badfd && fip->fi_action > 0)
607 tsignal(curthread, fip->fi_action);
608
609 return (NULL);
610 }
611 ufp->uf_refcnt++;
612 if (genp != NULL) {
613 *genp = ufp->uf_gen;
614 }
615
616 set_active_fd(fd); /* record the active file descriptor */
617
618 UF_EXIT(ufp);
619
620 return (fp);
621 }
622
623 file_t *
getf(int fd)624 getf(int fd)
625 {
626 return (getf_gen(fd, NULL));
627 }
628
629 /*
630 * Close whatever file currently occupies the file descriptor slot
631 * and install the new file, usually NULL, in the file descriptor slot.
632 * The close must complete before we release the file descriptor slot.
633 * If newfp != NULL we only return an error if we can't allocate the
634 * slot so the caller knows that it needs to free the filep;
635 * in the other cases we return the error number from closef().
636 */
637 int
closeandsetf(int fd,file_t * newfp)638 closeandsetf(int fd, file_t *newfp)
639 {
640 proc_t *p = curproc;
641 uf_info_t *fip = P_FINFO(p);
642 uf_entry_t *ufp;
643 file_t *fp;
644 fpollinfo_t *fpip;
645 portfd_t *pfd;
646 int error;
647
648 if ((uint_t)fd >= fip->fi_nfiles) {
649 if (newfp == NULL)
650 return (EBADF);
651 flist_grow(fd);
652 }
653
654 if (newfp != NULL) {
655 /*
656 * If ufp is reserved but has no file pointer, it's in the
657 * transition between ufalloc() and setf(). We must wait
658 * for this transition to complete before assigning the
659 * new non-NULL file pointer.
660 */
661 mutex_enter(&fip->fi_lock);
662 if (fd == fip->fi_badfd) {
663 mutex_exit(&fip->fi_lock);
664 if (fip->fi_action > 0)
665 tsignal(curthread, fip->fi_action);
666 return (EBADF);
667 }
668 UF_ENTER(ufp, fip, fd);
669 while (ufp->uf_busy && ufp->uf_file == NULL) {
670 mutex_exit(&fip->fi_lock);
671 cv_wait_stop(&ufp->uf_wanted_cv, &ufp->uf_lock, 250);
672 UF_EXIT(ufp);
673 mutex_enter(&fip->fi_lock);
674 UF_ENTER(ufp, fip, fd);
675 }
676 if ((fp = ufp->uf_file) == NULL) {
677 ASSERT(ufp->uf_fpollinfo == NULL);
678 ASSERT(ufp->uf_flag == 0);
679 fd_reserve(fip, fd, 1);
680 ufp->uf_file = newfp;
681 ufp->uf_gen++;
682 UF_EXIT(ufp);
683 mutex_exit(&fip->fi_lock);
684 return (0);
685 }
686 mutex_exit(&fip->fi_lock);
687 } else {
688 UF_ENTER(ufp, fip, fd);
689 if ((fp = ufp->uf_file) == NULL) {
690 UF_EXIT(ufp);
691 return (EBADF);
692 }
693 }
694
695 ASSERT(ufp->uf_busy);
696 ufp->uf_file = NULL;
697 ufp->uf_flag = 0;
698
699 /*
700 * If the file descriptor reference count is non-zero, then
701 * some other lwp in the process is performing system call
702 * activity on the file. To avoid blocking here for a long
703 * time (the other lwp might be in a long term sleep in its
704 * system call), we scan all other lwps in the process to
705 * find the ones with this fd as one of their active fds,
706 * set their a_stale flag, and set them running if they
707 * are in an interruptible sleep so they will emerge from
708 * their system calls immediately. post_syscall() will
709 * test the a_stale flag and set errno to EBADF.
710 */
711 ASSERT(ufp->uf_refcnt == 0 || p->p_lwpcnt > 1);
712 if (ufp->uf_refcnt > 0) {
713 kthread_t *t;
714
715 /*
716 * We call sprlock_proc(p) to ensure that the thread
717 * list will not change while we are scanning it.
718 * To do this, we must drop ufp->uf_lock and then
719 * reacquire it (so we are not holding both p->p_lock
720 * and ufp->uf_lock at the same time). ufp->uf_lock
721 * must be held for is_active_fd() to be correct
722 * (set_active_fd() is called while holding ufp->uf_lock).
723 *
724 * This is a convoluted dance, but it is better than
725 * the old brute-force method of stopping every thread
726 * in the process by calling holdlwps(SHOLDFORK1).
727 */
728
729 UF_EXIT(ufp);
730 COUNT(afd_wait);
731
732 mutex_enter(&p->p_lock);
733 sprlock_proc(p);
734 mutex_exit(&p->p_lock);
735
736 UF_ENTER(ufp, fip, fd);
737 ASSERT(ufp->uf_file == NULL);
738
739 if (ufp->uf_refcnt > 0) {
740 for (t = curthread->t_forw;
741 t != curthread;
742 t = t->t_forw) {
743 if (is_active_fd(t, fd)) {
744 thread_lock(t);
745 t->t_activefd.a_stale = 1;
746 t->t_post_sys = 1;
747 if (ISWAKEABLE(t))
748 setrun_locked(t);
749 thread_unlock(t);
750 }
751 }
752 }
753
754 UF_EXIT(ufp);
755
756 mutex_enter(&p->p_lock);
757 sprunlock(p);
758
759 UF_ENTER(ufp, fip, fd);
760 ASSERT(ufp->uf_file == NULL);
761 }
762
763 /*
764 * Wait for other lwps to stop using this file descriptor.
765 */
766 while (ufp->uf_refcnt > 0) {
767 cv_wait_stop(&ufp->uf_closing_cv, &ufp->uf_lock, 250);
768 /*
769 * cv_wait_stop() drops ufp->uf_lock, so the file list
770 * can change. Drop the lock on our (possibly) stale
771 * ufp and let UF_ENTER() find and lock the current ufp.
772 */
773 UF_EXIT(ufp);
774 UF_ENTER(ufp, fip, fd);
775 }
776
777 #ifdef DEBUG
778 /*
779 * catch a watchfd on device's pollhead list but not on fpollinfo list
780 */
781 if (ufp->uf_fpollinfo != NULL)
782 checkwfdlist(fp->f_vnode, ufp->uf_fpollinfo);
783 #endif /* DEBUG */
784
785 /*
786 * We may need to cleanup some cached poll states in t_pollstate
787 * before the fd can be reused. It is important that we don't
788 * access a stale thread structure. We will do the cleanup in two
789 * phases to avoid deadlock and holding uf_lock for too long.
790 * In phase 1, hold the uf_lock and call pollblockexit() to set
791 * state in t_pollstate struct so that a thread does not exit on
792 * us. In phase 2, we drop the uf_lock and call pollcacheclean().
793 */
794 pfd = ufp->uf_portfd;
795 ufp->uf_portfd = NULL;
796 fpip = ufp->uf_fpollinfo;
797 ufp->uf_fpollinfo = NULL;
798 if (fpip != NULL)
799 pollblockexit(fpip);
800 UF_EXIT(ufp);
801 if (fpip != NULL)
802 pollcacheclean(fpip, fd);
803 if (pfd)
804 port_close_fd(pfd);
805
806 /*
807 * Keep the file descriptor entry reserved across the closef().
808 */
809 error = closef(fp);
810
811 setf(fd, newfp);
812
813 /* Only return closef() error when closing is all we do */
814 return (newfp == NULL ? error : 0);
815 }
816
817 /*
818 * Decrement uf_refcnt; wakeup anyone waiting to close the file.
819 */
820 void
releasef(int fd)821 releasef(int fd)
822 {
823 uf_info_t *fip = P_FINFO(curproc);
824 uf_entry_t *ufp;
825
826 UF_ENTER(ufp, fip, fd);
827 ASSERT(ufp->uf_refcnt > 0);
828 clear_active_fd(fd); /* clear the active file descriptor */
829 if (--ufp->uf_refcnt == 0)
830 cv_broadcast(&ufp->uf_closing_cv);
831 UF_EXIT(ufp);
832 }
833
834 /*
835 * Identical to releasef() but can be called from another process.
836 */
837 void
areleasef(int fd,uf_info_t * fip)838 areleasef(int fd, uf_info_t *fip)
839 {
840 uf_entry_t *ufp;
841
842 UF_ENTER(ufp, fip, fd);
843 ASSERT(ufp->uf_refcnt > 0);
844 if (--ufp->uf_refcnt == 0)
845 cv_broadcast(&ufp->uf_closing_cv);
846 UF_EXIT(ufp);
847 }
848
849 /*
850 * Duplicate all file descriptors across a fork.
851 */
852 void
flist_fork(uf_info_t * pfip,uf_info_t * cfip)853 flist_fork(uf_info_t *pfip, uf_info_t *cfip)
854 {
855 int fd, nfiles;
856 uf_entry_t *pufp, *cufp;
857
858 mutex_init(&cfip->fi_lock, NULL, MUTEX_DEFAULT, NULL);
859 cfip->fi_rlist = NULL;
860
861 /*
862 * We don't need to hold fi_lock because all other lwp's in the
863 * parent have been held.
864 */
865 cfip->fi_nfiles = nfiles = flist_minsize(pfip);
866
867 cfip->fi_list = nfiles == 0 ? NULL :
868 kmem_zalloc(nfiles * sizeof (uf_entry_t), KM_SLEEP);
869
870 for (fd = 0, pufp = pfip->fi_list, cufp = cfip->fi_list; fd < nfiles;
871 fd++, pufp++, cufp++) {
872 boolean_t unreserve = B_FALSE;
873
874 /*
875 * Check to see if FD_CLOFORK is set. In this case we 'close'
876 * the file descriptor by simply not duplicating it and leaving
877 * this entry as an empty descriptor. While we don't need to
878 * close the underlying file_t, we do need to make sure we take
879 * care of cleaning up our reservation. We do not reset the
880 * generation either, simulating a setf here.
881 */
882 if ((pufp->uf_flag & FD_CLOFORK) == 0) {
883 cufp->uf_file = pufp->uf_file;
884 cufp->uf_flag = pufp->uf_flag;
885 }
886 cufp->uf_busy = pufp->uf_busy;
887 cufp->uf_alloc = pufp->uf_alloc;
888 cufp->uf_gen = pufp->uf_gen;
889
890 /*
891 * We may have to clean up our allocation tracking. This happens
892 * either because we have no file due to the fact that we're
893 * busy or because we had a file and FD_CLOFORK is set. If there
894 * is no file and we're not busy, then the unreserve was already
895 * taken care of.
896 */
897 if (pufp->uf_file == NULL) {
898 ASSERT3U(pufp->uf_flag, ==, 0);
899 if (pufp->uf_busy) {
900 unreserve = B_TRUE;
901 }
902 } else if ((pufp->uf_flag & FD_CLOFORK) != 0) {
903 ASSERT3P(pufp->uf_file, !=, NULL);
904 unreserve = B_TRUE;
905 }
906
907 if (unreserve) {
908 /*
909 * Grab locks to appease ASSERTs in fd_reserve
910 */
911 mutex_enter(&cfip->fi_lock);
912 mutex_enter(&cufp->uf_lock);
913 fd_reserve(cfip, fd, -1);
914 mutex_exit(&cufp->uf_lock);
915 mutex_exit(&cfip->fi_lock);
916 }
917 }
918 }
919
920 /*
921 * Close all open file descriptors for the current process.
922 * This is only called from exit(), which is single-threaded,
923 * so we don't need any locking.
924 */
925 void
closeall(uf_info_t * fip)926 closeall(uf_info_t *fip)
927 {
928 int fd;
929 file_t *fp;
930 uf_entry_t *ufp;
931
932 ufp = fip->fi_list;
933 for (fd = 0; fd < fip->fi_nfiles; fd++, ufp++) {
934 if ((fp = ufp->uf_file) != NULL) {
935 ufp->uf_file = NULL;
936 if (ufp->uf_portfd != NULL) {
937 portfd_t *pfd;
938 /* remove event port association */
939 pfd = ufp->uf_portfd;
940 ufp->uf_portfd = NULL;
941 port_close_fd(pfd);
942 }
943 ASSERT(ufp->uf_fpollinfo == NULL);
944 (void) closef(fp);
945 }
946 }
947
948 kmem_free(fip->fi_list, fip->fi_nfiles * sizeof (uf_entry_t));
949 fip->fi_list = NULL;
950 fip->fi_nfiles = 0;
951 while (fip->fi_rlist != NULL) {
952 uf_rlist_t *urp = fip->fi_rlist;
953 fip->fi_rlist = urp->ur_next;
954 kmem_free(urp->ur_list, urp->ur_nfiles * sizeof (uf_entry_t));
955 kmem_free(urp, sizeof (uf_rlist_t));
956 }
957 }
958
959 /*
960 * Internal form of close. Decrement reference count on file
961 * structure. Decrement reference count on the vnode following
962 * removal of the referencing file structure.
963 */
964 int
closef(file_t * fp)965 closef(file_t *fp)
966 {
967 vnode_t *vp;
968 int error;
969 int count;
970 int flag;
971 offset_t offset;
972
973 /*
974 * audit close of file (may be exit)
975 */
976 if (AU_AUDITING())
977 audit_closef(fp);
978 ASSERT(MUTEX_NOT_HELD(&P_FINFO(curproc)->fi_lock));
979
980 mutex_enter(&fp->f_tlock);
981
982 ASSERT(fp->f_count > 0);
983
984 count = fp->f_count--;
985 flag = fp->f_flag;
986 offset = fp->f_offset;
987
988 vp = fp->f_vnode;
989
990 error = VOP_CLOSE(vp, flag, count, offset, fp->f_cred, NULL);
991
992 if (count > 1) {
993 mutex_exit(&fp->f_tlock);
994 return (error);
995 }
996 ASSERT(fp->f_count == 0);
997 /* Last reference, remove any OFD style lock for the file_t */
998 ofdcleanlock(fp);
999 mutex_exit(&fp->f_tlock);
1000
1001 /*
1002 * If DTrace has getf() subroutines active, it will set dtrace_closef
1003 * to point to code that implements a barrier with respect to probe
1004 * context. This must be called before the file_t is freed (and the
1005 * vnode that it refers to is released) -- but it must be after the
1006 * file_t has been removed from the uf_entry_t. That is, there must
1007 * be no way for a racing getf() in probe context to yield the fp that
1008 * we're operating upon.
1009 */
1010 if (dtrace_closef != NULL)
1011 (*dtrace_closef)();
1012
1013 VN_RELE(vp);
1014 /*
1015 * deallocate resources to audit_data
1016 */
1017 if (audit_active)
1018 audit_unfalloc(fp);
1019 crfree(fp->f_cred);
1020 kmem_cache_free(file_cache, fp);
1021 return (error);
1022 }
1023
1024 /*
1025 * This is a combination of ufalloc() and setf().
1026 */
1027 int
ufalloc_file(int start,file_t * fp)1028 ufalloc_file(int start, file_t *fp)
1029 {
1030 proc_t *p = curproc;
1031 uf_info_t *fip = P_FINFO(p);
1032 int filelimit;
1033 uf_entry_t *ufp;
1034 int nfiles;
1035 int fd;
1036
1037 /*
1038 * Assertion is to convince the correctness of the following
1039 * assignment for filelimit after casting to int.
1040 */
1041 ASSERT(p->p_fno_ctl <= INT_MAX);
1042 filelimit = (int)p->p_fno_ctl;
1043
1044 for (;;) {
1045 mutex_enter(&fip->fi_lock);
1046 fd = fd_find(fip, start);
1047 if (fd >= 0 && fd == fip->fi_badfd) {
1048 start = fd + 1;
1049 mutex_exit(&fip->fi_lock);
1050 continue;
1051 }
1052 if ((uint_t)fd < filelimit)
1053 break;
1054 if (fd >= filelimit) {
1055 mutex_exit(&fip->fi_lock);
1056 mutex_enter(&p->p_lock);
1057 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
1058 p->p_rctls, p, RCA_SAFE);
1059 mutex_exit(&p->p_lock);
1060 return (-1);
1061 }
1062 /* fd_find() returned -1 */
1063 nfiles = fip->fi_nfiles;
1064 mutex_exit(&fip->fi_lock);
1065 flist_grow(MAX(start, nfiles));
1066 }
1067
1068 UF_ENTER(ufp, fip, fd);
1069 fd_reserve(fip, fd, 1);
1070 ASSERT(ufp->uf_file == NULL);
1071 ufp->uf_file = fp;
1072 if (fp != NULL) {
1073 ufp->uf_gen++;
1074 }
1075 UF_EXIT(ufp);
1076 mutex_exit(&fip->fi_lock);
1077 return (fd);
1078 }
1079
1080 /*
1081 * Allocate a user file descriptor greater than or equal to "start".
1082 */
1083 int
ufalloc(int start)1084 ufalloc(int start)
1085 {
1086 return (ufalloc_file(start, NULL));
1087 }
1088
1089 /*
1090 * Check that a future allocation of count fds on proc p has a good
1091 * chance of succeeding. If not, do rctl processing as if we'd failed
1092 * the allocation.
1093 *
1094 * Our caller must guarantee that p cannot disappear underneath us.
1095 */
1096 int
ufcanalloc(proc_t * p,uint_t count)1097 ufcanalloc(proc_t *p, uint_t count)
1098 {
1099 uf_info_t *fip = P_FINFO(p);
1100 int filelimit;
1101 int current;
1102
1103 if (count == 0)
1104 return (1);
1105
1106 ASSERT(p->p_fno_ctl <= INT_MAX);
1107 filelimit = (int)p->p_fno_ctl;
1108
1109 mutex_enter(&fip->fi_lock);
1110 current = flist_nalloc(fip); /* # of in-use descriptors */
1111 mutex_exit(&fip->fi_lock);
1112
1113 /*
1114 * If count is a positive integer, the worst that can happen is
1115 * an overflow to a negative value, which is caught by the >= 0 check.
1116 */
1117 current += count;
1118 if (count <= INT_MAX && current >= 0 && current <= filelimit)
1119 return (1);
1120
1121 mutex_enter(&p->p_lock);
1122 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
1123 p->p_rctls, p, RCA_SAFE);
1124 mutex_exit(&p->p_lock);
1125 return (0);
1126 }
1127
1128 /*
1129 * Allocate a user file descriptor and a file structure.
1130 * Initialize the descriptor to point at the file structure.
1131 * If fdp is NULL, the user file descriptor will not be allocated.
1132 */
1133 int
falloc(vnode_t * vp,int flag,file_t ** fpp,int * fdp)1134 falloc(vnode_t *vp, int flag, file_t **fpp, int *fdp)
1135 {
1136 file_t *fp;
1137 int fd;
1138
1139 if (fdp) {
1140 if ((fd = ufalloc(0)) == -1)
1141 return (EMFILE);
1142 }
1143 fp = kmem_cache_alloc(file_cache, KM_SLEEP);
1144 /*
1145 * Note: falloc returns the fp locked
1146 */
1147 mutex_enter(&fp->f_tlock);
1148 fp->f_count = 1;
1149 fp->f_flag = (ushort_t)flag;
1150 fp->f_flag2 = (flag & (FSEARCH|FEXEC)) >> 16;
1151 fp->f_vnode = vp;
1152 fp->f_offset = 0;
1153 fp->f_audit_data = 0;
1154 crhold(fp->f_cred = CRED());
1155 /*
1156 * allocate resources to audit_data
1157 */
1158 if (audit_active)
1159 audit_falloc(fp);
1160 *fpp = fp;
1161 if (fdp)
1162 *fdp = fd;
1163 return (0);
1164 }
1165
1166 /*ARGSUSED*/
1167 static int
file_cache_constructor(void * buf,void * cdrarg,int kmflags)1168 file_cache_constructor(void *buf, void *cdrarg, int kmflags)
1169 {
1170 file_t *fp = buf;
1171
1172 mutex_init(&fp->f_tlock, NULL, MUTEX_DEFAULT, NULL);
1173 return (0);
1174 }
1175
1176 /*ARGSUSED*/
1177 static void
file_cache_destructor(void * buf,void * cdrarg)1178 file_cache_destructor(void *buf, void *cdrarg)
1179 {
1180 file_t *fp = buf;
1181
1182 mutex_destroy(&fp->f_tlock);
1183 }
1184
1185 void
finit()1186 finit()
1187 {
1188 file_cache = kmem_cache_create("file_cache", sizeof (file_t), 0,
1189 file_cache_constructor, file_cache_destructor, NULL, NULL, NULL, 0);
1190 }
1191
1192 void
unfalloc(file_t * fp)1193 unfalloc(file_t *fp)
1194 {
1195 ASSERT(MUTEX_HELD(&fp->f_tlock));
1196 if (--fp->f_count <= 0) {
1197 /*
1198 * deallocate resources to audit_data
1199 */
1200 if (audit_active)
1201 audit_unfalloc(fp);
1202 crfree(fp->f_cred);
1203 mutex_exit(&fp->f_tlock);
1204 kmem_cache_free(file_cache, fp);
1205 } else
1206 mutex_exit(&fp->f_tlock);
1207 }
1208
1209 /*
1210 * Given a file descriptor, set the user's
1211 * file pointer to the given parameter.
1212 */
1213 void
setf(int fd,file_t * fp)1214 setf(int fd, file_t *fp)
1215 {
1216 uf_info_t *fip = P_FINFO(curproc);
1217 uf_entry_t *ufp;
1218
1219 if (AU_AUDITING())
1220 audit_setf(fp, fd);
1221
1222 if (fp == NULL) {
1223 mutex_enter(&fip->fi_lock);
1224 UF_ENTER(ufp, fip, fd);
1225 fd_reserve(fip, fd, -1);
1226 mutex_exit(&fip->fi_lock);
1227 } else {
1228 UF_ENTER(ufp, fip, fd);
1229 ASSERT(ufp->uf_busy);
1230 ufp->uf_gen++;
1231 }
1232 ASSERT(ufp->uf_fpollinfo == NULL);
1233 ASSERT(ufp->uf_flag == 0);
1234 ufp->uf_file = fp;
1235 cv_broadcast(&ufp->uf_wanted_cv);
1236 UF_EXIT(ufp);
1237 }
1238
1239 /*
1240 * Given a file descriptor, return the file table flags, plus,
1241 * if this is a socket in asynchronous mode, the FASYNC flag.
1242 * getf() may or may not have been called before calling f_getfl().
1243 */
1244 int
f_getfl(int fd,int * flagp)1245 f_getfl(int fd, int *flagp)
1246 {
1247 uf_info_t *fip = P_FINFO(curproc);
1248 uf_entry_t *ufp;
1249 file_t *fp;
1250 int error;
1251
1252 if ((uint_t)fd >= fip->fi_nfiles)
1253 error = EBADF;
1254 else {
1255 UF_ENTER(ufp, fip, fd);
1256 if ((fp = ufp->uf_file) == NULL)
1257 error = EBADF;
1258 else {
1259 vnode_t *vp = fp->f_vnode;
1260 int flag = fp->f_flag | (fp->f_flag2 << 16);
1261
1262 /*
1263 * BSD fcntl() FASYNC compatibility.
1264 */
1265 if (vp->v_type == VSOCK)
1266 flag |= sock_getfasync(vp);
1267 *flagp = flag;
1268 error = 0;
1269 }
1270 UF_EXIT(ufp);
1271 }
1272
1273 return (error);
1274 }
1275
1276 /*
1277 * Given a file descriptor, return the user's file flags.
1278 * Force the FD_CLOEXEC flag for writable self-open /proc files.
1279 * getf() may or may not have been called before calling f_getfd_error().
1280 */
1281 int
f_getfd_error(int fd,int * flagp)1282 f_getfd_error(int fd, int *flagp)
1283 {
1284 uf_info_t *fip = P_FINFO(curproc);
1285 uf_entry_t *ufp;
1286 file_t *fp;
1287 int flag;
1288 int error;
1289
1290 if ((uint_t)fd >= fip->fi_nfiles)
1291 error = EBADF;
1292 else {
1293 UF_ENTER(ufp, fip, fd);
1294 if ((fp = ufp->uf_file) == NULL) {
1295 error = EBADF;
1296 } else {
1297 flag = ufp->uf_flag;
1298 if ((fp->f_flag & FWRITE) && pr_isself(fp->f_vnode))
1299 flag |= FD_CLOEXEC;
1300 *flagp = flag;
1301 error = 0;
1302 }
1303 UF_EXIT(ufp);
1304 }
1305
1306 return (error);
1307 }
1308
1309 /*
1310 * getf() must have been called before calling f_getfd().
1311 */
1312 char
f_getfd(int fd)1313 f_getfd(int fd)
1314 {
1315 int flag = 0;
1316 (void) f_getfd_error(fd, &flag);
1317 return ((char)flag);
1318 }
1319
1320 /*
1321 * Given a file descriptor and file flags, set the user's file flags.
1322 * At present, the only valid flags are FD_CLOEXEC and FD_CLOFORK.
1323 * getf() may or may not have been called before calling f_setfd_error().
1324 */
1325 static int
f_setfd_int(int fd,int flags,bool or)1326 f_setfd_int(int fd, int flags, bool or)
1327 {
1328 uf_info_t *fip = P_FINFO(curproc);
1329 uf_entry_t *ufp;
1330 int error;
1331
1332 if ((uint_t)fd >= fip->fi_nfiles) {
1333 error = EBADF;
1334 } else {
1335 UF_ENTER(ufp, fip, fd);
1336 if (ufp->uf_file == NULL) {
1337 error = EBADF;
1338 } else {
1339 flags &= (FD_CLOEXEC | FD_CLOFORK);
1340 if (or) {
1341 ufp->uf_flag |= flags;
1342 } else {
1343 ufp->uf_flag = flags;
1344 }
1345 error = 0;
1346 }
1347 UF_EXIT(ufp);
1348 }
1349 return (error);
1350 }
1351
1352 int
f_setfd_error(int fd,int flags)1353 f_setfd_error(int fd, int flags)
1354 {
1355 return (f_setfd_int(fd, flags, false));
1356 }
1357
1358 void
f_setfd_or(int fd,short flags)1359 f_setfd_or(int fd, short flags)
1360 {
1361 (void) f_setfd_int(fd, flags, true);
1362 }
1363
1364 #define BADFD_MIN 3
1365 #define BADFD_MAX 255
1366
1367 /*
1368 * Attempt to allocate a file descriptor which is bad and which
1369 * is "poison" to the application. It cannot be closed (except
1370 * on exec), allocated for a different use, etc.
1371 */
1372 int
f_badfd(int start,int * fdp,int action)1373 f_badfd(int start, int *fdp, int action)
1374 {
1375 int fdr;
1376 int badfd;
1377 uf_info_t *fip = P_FINFO(curproc);
1378
1379 #ifdef _LP64
1380 /* No restrictions on 64 bit _file */
1381 if (get_udatamodel() != DATAMODEL_ILP32)
1382 return (EINVAL);
1383 #endif
1384
1385 if (start > BADFD_MAX || start < BADFD_MIN)
1386 return (EINVAL);
1387
1388 if (action >= NSIG || action < 0)
1389 return (EINVAL);
1390
1391 mutex_enter(&fip->fi_lock);
1392 badfd = fip->fi_badfd;
1393 mutex_exit(&fip->fi_lock);
1394
1395 if (badfd != -1)
1396 return (EAGAIN);
1397
1398 fdr = ufalloc(start);
1399
1400 if (fdr > BADFD_MAX) {
1401 setf(fdr, NULL);
1402 return (EMFILE);
1403 }
1404 if (fdr < 0)
1405 return (EMFILE);
1406
1407 mutex_enter(&fip->fi_lock);
1408 if (fip->fi_badfd != -1) {
1409 /* Lost race */
1410 mutex_exit(&fip->fi_lock);
1411 setf(fdr, NULL);
1412 return (EAGAIN);
1413 }
1414 fip->fi_action = action;
1415 fip->fi_badfd = fdr;
1416 mutex_exit(&fip->fi_lock);
1417 setf(fdr, NULL);
1418
1419 *fdp = fdr;
1420
1421 return (0);
1422 }
1423
1424 /*
1425 * Allocate a file descriptor and assign it to the vnode "*vpp",
1426 * performing the usual open protocol upon it and returning the
1427 * file descriptor allocated. It is the responsibility of the
1428 * caller to dispose of "*vpp" if any error occurs.
1429 */
1430 int
fassign(vnode_t ** vpp,int mode,int * fdp)1431 fassign(vnode_t **vpp, int mode, int *fdp)
1432 {
1433 file_t *fp;
1434 int error;
1435 int fd;
1436
1437 if (error = falloc((vnode_t *)NULL, mode, &fp, &fd))
1438 return (error);
1439 if (error = VOP_OPEN(vpp, mode, fp->f_cred, NULL)) {
1440 setf(fd, NULL);
1441 unfalloc(fp);
1442 return (error);
1443 }
1444 fp->f_vnode = *vpp;
1445 mutex_exit(&fp->f_tlock);
1446 /*
1447 * Fill in the slot falloc reserved.
1448 */
1449 setf(fd, fp);
1450 *fdp = fd;
1451 return (0);
1452 }
1453
1454 /*
1455 * When a process forks it must increment the f_count of all file pointers
1456 * since there is a new process pointing at them. fcnt_add(fip, 1) does this.
1457 * Since we are called when there is only 1 active lwp we don't need to
1458 * hold fi_lock or any uf_lock. If the fork fails, fork_fail() calls
1459 * fcnt_add(fip, -1) to restore the counts.
1460 */
1461 void
fcnt_add(uf_info_t * fip,int incr)1462 fcnt_add(uf_info_t *fip, int incr)
1463 {
1464 int i;
1465 uf_entry_t *ufp;
1466 file_t *fp;
1467
1468 ufp = fip->fi_list;
1469 for (i = 0; i < fip->fi_nfiles; i++, ufp++) {
1470 if ((fp = ufp->uf_file) != NULL) {
1471 mutex_enter(&fp->f_tlock);
1472 ASSERT((incr == 1 && fp->f_count >= 1) ||
1473 (incr == -1 && fp->f_count >= 2));
1474 fp->f_count += incr;
1475 mutex_exit(&fp->f_tlock);
1476 }
1477 }
1478 }
1479
1480 /*
1481 * This is called from exec to close all fd's that have the FD_CLOEXEC flag
1482 * set and also to close all self-open for write /proc file descriptors.
1483 */
1484 void
close_exec(uf_info_t * fip)1485 close_exec(uf_info_t *fip)
1486 {
1487 int fd;
1488 file_t *fp;
1489 fpollinfo_t *fpip;
1490 uf_entry_t *ufp;
1491 portfd_t *pfd;
1492
1493 ufp = fip->fi_list;
1494 for (fd = 0; fd < fip->fi_nfiles; fd++, ufp++) {
1495 if ((fp = ufp->uf_file) != NULL &&
1496 ((ufp->uf_flag & FD_CLOEXEC) ||
1497 ((fp->f_flag & FWRITE) && pr_isself(fp->f_vnode)))) {
1498 fpip = ufp->uf_fpollinfo;
1499 mutex_enter(&fip->fi_lock);
1500 mutex_enter(&ufp->uf_lock);
1501 fd_reserve(fip, fd, -1);
1502 mutex_exit(&fip->fi_lock);
1503 ufp->uf_file = NULL;
1504 ufp->uf_fpollinfo = NULL;
1505 ufp->uf_flag = 0;
1506 /*
1507 * We may need to cleanup some cached poll states
1508 * in t_pollstate before the fd can be reused. It
1509 * is important that we don't access a stale thread
1510 * structure. We will do the cleanup in two
1511 * phases to avoid deadlock and holding uf_lock for
1512 * too long. In phase 1, hold the uf_lock and call
1513 * pollblockexit() to set state in t_pollstate struct
1514 * so that a thread does not exit on us. In phase 2,
1515 * we drop the uf_lock and call pollcacheclean().
1516 */
1517 pfd = ufp->uf_portfd;
1518 ufp->uf_portfd = NULL;
1519 if (fpip != NULL)
1520 pollblockexit(fpip);
1521 mutex_exit(&ufp->uf_lock);
1522 if (fpip != NULL)
1523 pollcacheclean(fpip, fd);
1524 if (pfd)
1525 port_close_fd(pfd);
1526 (void) closef(fp);
1527 }
1528 }
1529
1530 /* Reset bad fd */
1531 fip->fi_badfd = -1;
1532 fip->fi_action = -1;
1533 }
1534
1535 /*
1536 * Utility function called by most of the *at() system call interfaces.
1537 *
1538 * Generate a starting vnode pointer for an (fd, path) pair where 'fd'
1539 * is an open file descriptor for a directory to be used as the starting
1540 * point for the lookup of the relative pathname 'path' (or, if path is
1541 * NULL, generate a vnode pointer for the direct target of the operation).
1542 *
1543 * If we successfully return a non-NULL startvp, it has been the target
1544 * of VN_HOLD() and the caller must call VN_RELE() on it.
1545 */
1546 int
fgetstartvp(int fd,char * path,vnode_t ** startvpp)1547 fgetstartvp(int fd, char *path, vnode_t **startvpp)
1548 {
1549 vnode_t *startvp;
1550 file_t *startfp;
1551 char startchar;
1552
1553 if (fd == AT_FDCWD && path == NULL)
1554 return (EFAULT);
1555
1556 if (fd == AT_FDCWD) {
1557 /*
1558 * Start from the current working directory.
1559 */
1560 startvp = NULL;
1561 } else {
1562 if (path == NULL)
1563 startchar = '\0';
1564 else if (copyin(path, &startchar, sizeof (char)))
1565 return (EFAULT);
1566
1567 if (startchar == '/') {
1568 /*
1569 * 'path' is an absolute pathname.
1570 */
1571 startvp = NULL;
1572 } else {
1573 /*
1574 * 'path' is a relative pathname or we will
1575 * be applying the operation to 'fd' itself.
1576 */
1577 if ((startfp = getf(fd)) == NULL)
1578 return (EBADF);
1579 startvp = startfp->f_vnode;
1580 VN_HOLD(startvp);
1581 releasef(fd);
1582 }
1583 }
1584 *startvpp = startvp;
1585 return (0);
1586 }
1587
1588 /*
1589 * Called from fchownat() and fchmodat() to set ownership and mode.
1590 * The contents of *vap must be set before calling here.
1591 */
1592 int
fsetattrat(int fd,char * path,int flags,struct vattr * vap)1593 fsetattrat(int fd, char *path, int flags, struct vattr *vap)
1594 {
1595 vnode_t *startvp;
1596 vnode_t *vp;
1597 int error;
1598
1599 /*
1600 * Since we are never called to set the size of a file, we don't
1601 * need to check for non-blocking locks (via nbl_need_check(vp)).
1602 */
1603 ASSERT(!(vap->va_mask & AT_SIZE));
1604
1605 if ((error = fgetstartvp(fd, path, &startvp)) != 0)
1606 return (error);
1607 if (AU_AUDITING() && startvp != NULL)
1608 audit_setfsat_path(1);
1609
1610 /*
1611 * Do lookup for fchownat/fchmodat when path not NULL
1612 */
1613 if (path != NULL) {
1614 if (error = lookupnameat(path, UIO_USERSPACE,
1615 (flags == AT_SYMLINK_NOFOLLOW) ?
1616 NO_FOLLOW : FOLLOW,
1617 NULLVPP, &vp, startvp)) {
1618 if (startvp != NULL)
1619 VN_RELE(startvp);
1620 return (error);
1621 }
1622 } else {
1623 vp = startvp;
1624 ASSERT(vp);
1625 VN_HOLD(vp);
1626 }
1627
1628 if (vp->v_type == VLNK && (vap->va_mask & AT_MODE) != 0) {
1629 error = EOPNOTSUPP;
1630 } else if (vn_is_readonly(vp)) {
1631 error = EROFS;
1632 } else {
1633 error = VOP_SETATTR(vp, vap, 0, CRED(), NULL);
1634 }
1635
1636 if (startvp != NULL)
1637 VN_RELE(startvp);
1638 VN_RELE(vp);
1639
1640 return (error);
1641 }
1642
1643 /*
1644 * Return true if the given vnode is referenced by any
1645 * entry in the current process's file descriptor table.
1646 */
1647 int
fisopen(vnode_t * vp)1648 fisopen(vnode_t *vp)
1649 {
1650 int fd;
1651 file_t *fp;
1652 vnode_t *ovp;
1653 uf_info_t *fip = P_FINFO(curproc);
1654 uf_entry_t *ufp;
1655
1656 mutex_enter(&fip->fi_lock);
1657 for (fd = 0; fd < fip->fi_nfiles; fd++) {
1658 UF_ENTER(ufp, fip, fd);
1659 if ((fp = ufp->uf_file) != NULL &&
1660 (ovp = fp->f_vnode) != NULL && VN_CMP(vp, ovp)) {
1661 UF_EXIT(ufp);
1662 mutex_exit(&fip->fi_lock);
1663 return (1);
1664 }
1665 UF_EXIT(ufp);
1666 }
1667 mutex_exit(&fip->fi_lock);
1668 return (0);
1669 }
1670
1671 /*
1672 * Return zero if at least one file currently open (by curproc) shouldn't be
1673 * allowed to change zones.
1674 */
1675 int
files_can_change_zones(void)1676 files_can_change_zones(void)
1677 {
1678 int fd;
1679 file_t *fp;
1680 uf_info_t *fip = P_FINFO(curproc);
1681 uf_entry_t *ufp;
1682
1683 mutex_enter(&fip->fi_lock);
1684 for (fd = 0; fd < fip->fi_nfiles; fd++) {
1685 UF_ENTER(ufp, fip, fd);
1686 if ((fp = ufp->uf_file) != NULL &&
1687 !vn_can_change_zones(fp->f_vnode)) {
1688 UF_EXIT(ufp);
1689 mutex_exit(&fip->fi_lock);
1690 return (0);
1691 }
1692 UF_EXIT(ufp);
1693 }
1694 mutex_exit(&fip->fi_lock);
1695 return (1);
1696 }
1697
1698 #ifdef DEBUG
1699
1700 /*
1701 * The following functions are only used in ASSERT()s elsewhere.
1702 * They do not modify the state of the system.
1703 */
1704
1705 /*
1706 * Return true (1) if the current thread is in the fpollinfo
1707 * list for this file descriptor, else false (0).
1708 */
1709 static int
curthread_in_plist(uf_entry_t * ufp)1710 curthread_in_plist(uf_entry_t *ufp)
1711 {
1712 fpollinfo_t *fpip;
1713
1714 ASSERT(MUTEX_HELD(&ufp->uf_lock));
1715 for (fpip = ufp->uf_fpollinfo; fpip; fpip = fpip->fp_next)
1716 if (fpip->fp_thread == curthread)
1717 return (1);
1718 return (0);
1719 }
1720
1721 /*
1722 * Sanity check to make sure that after lwp_exit(),
1723 * curthread does not appear on any fd's fpollinfo list.
1724 */
1725 void
checkfpollinfo(void)1726 checkfpollinfo(void)
1727 {
1728 int fd;
1729 uf_info_t *fip = P_FINFO(curproc);
1730 uf_entry_t *ufp;
1731
1732 mutex_enter(&fip->fi_lock);
1733 for (fd = 0; fd < fip->fi_nfiles; fd++) {
1734 UF_ENTER(ufp, fip, fd);
1735 ASSERT(!curthread_in_plist(ufp));
1736 UF_EXIT(ufp);
1737 }
1738 mutex_exit(&fip->fi_lock);
1739 }
1740
1741 /*
1742 * Return true (1) if the current thread is in the fpollinfo
1743 * list for this file descriptor, else false (0).
1744 * This is the same as curthread_in_plist(),
1745 * but is called w/o holding uf_lock.
1746 */
1747 int
infpollinfo(int fd)1748 infpollinfo(int fd)
1749 {
1750 uf_info_t *fip = P_FINFO(curproc);
1751 uf_entry_t *ufp;
1752 int rc;
1753
1754 UF_ENTER(ufp, fip, fd);
1755 rc = curthread_in_plist(ufp);
1756 UF_EXIT(ufp);
1757 return (rc);
1758 }
1759
1760 #endif /* DEBUG */
1761
1762 /*
1763 * Add the curthread to fpollinfo list, meaning this fd is currently in the
1764 * thread's poll cache. Each lwp polling this file descriptor should call
1765 * this routine once.
1766 */
1767 void
addfpollinfo(int fd)1768 addfpollinfo(int fd)
1769 {
1770 struct uf_entry *ufp;
1771 fpollinfo_t *fpip;
1772 uf_info_t *fip = P_FINFO(curproc);
1773
1774 fpip = kmem_zalloc(sizeof (fpollinfo_t), KM_SLEEP);
1775 fpip->fp_thread = curthread;
1776 UF_ENTER(ufp, fip, fd);
1777 /*
1778 * Assert we are not already on the list, that is, that
1779 * this lwp did not call addfpollinfo twice for the same fd.
1780 */
1781 ASSERT(!curthread_in_plist(ufp));
1782 /*
1783 * addfpollinfo is always done inside the getf/releasef pair.
1784 */
1785 ASSERT(ufp->uf_refcnt >= 1);
1786 fpip->fp_next = ufp->uf_fpollinfo;
1787 ufp->uf_fpollinfo = fpip;
1788 UF_EXIT(ufp);
1789 }
1790
1791 /*
1792 * Delete curthread from fpollinfo list if it is there.
1793 */
1794 void
delfpollinfo(int fd)1795 delfpollinfo(int fd)
1796 {
1797 struct uf_entry *ufp;
1798 struct fpollinfo *fpip;
1799 struct fpollinfo **fpipp;
1800 uf_info_t *fip = P_FINFO(curproc);
1801
1802 UF_ENTER(ufp, fip, fd);
1803 for (fpipp = &ufp->uf_fpollinfo;
1804 (fpip = *fpipp) != NULL;
1805 fpipp = &fpip->fp_next) {
1806 if (fpip->fp_thread == curthread) {
1807 *fpipp = fpip->fp_next;
1808 kmem_free(fpip, sizeof (fpollinfo_t));
1809 break;
1810 }
1811 }
1812 /*
1813 * Assert that we are not still on the list, that is, that
1814 * this lwp did not call addfpollinfo twice for the same fd.
1815 */
1816 ASSERT(!curthread_in_plist(ufp));
1817 UF_EXIT(ufp);
1818 }
1819
1820 /*
1821 * fd is associated with a port. pfd is a pointer to the fd entry in the
1822 * cache of the port.
1823 */
1824
1825 void
addfd_port(int fd,portfd_t * pfd)1826 addfd_port(int fd, portfd_t *pfd)
1827 {
1828 struct uf_entry *ufp;
1829 uf_info_t *fip = P_FINFO(curproc);
1830
1831 UF_ENTER(ufp, fip, fd);
1832 /*
1833 * addfd_port is always done inside the getf/releasef pair.
1834 */
1835 ASSERT(ufp->uf_refcnt >= 1);
1836 if (ufp->uf_portfd == NULL) {
1837 /* first entry */
1838 ufp->uf_portfd = pfd;
1839 pfd->pfd_next = NULL;
1840 } else {
1841 pfd->pfd_next = ufp->uf_portfd;
1842 ufp->uf_portfd = pfd;
1843 pfd->pfd_next->pfd_prev = pfd;
1844 }
1845 UF_EXIT(ufp);
1846 }
1847
1848 void
delfd_port(int fd,portfd_t * pfd)1849 delfd_port(int fd, portfd_t *pfd)
1850 {
1851 struct uf_entry *ufp;
1852 uf_info_t *fip = P_FINFO(curproc);
1853
1854 UF_ENTER(ufp, fip, fd);
1855 /*
1856 * delfd_port is always done inside the getf/releasef pair.
1857 */
1858 ASSERT(ufp->uf_refcnt >= 1);
1859 if (ufp->uf_portfd == pfd) {
1860 /* remove first entry */
1861 ufp->uf_portfd = pfd->pfd_next;
1862 } else {
1863 pfd->pfd_prev->pfd_next = pfd->pfd_next;
1864 if (pfd->pfd_next != NULL)
1865 pfd->pfd_next->pfd_prev = pfd->pfd_prev;
1866 }
1867 UF_EXIT(ufp);
1868 }
1869
1870 static void
port_close_fd(portfd_t * pfd)1871 port_close_fd(portfd_t *pfd)
1872 {
1873 portfd_t *pfdn;
1874
1875 /*
1876 * At this point, no other thread should access
1877 * the portfd_t list for this fd. The uf_file, uf_portfd
1878 * pointers in the uf_entry_t struct for this fd would
1879 * be set to NULL.
1880 */
1881 for (; pfd != NULL; pfd = pfdn) {
1882 pfdn = pfd->pfd_next;
1883 port_close_pfd(pfd);
1884 }
1885 }
1886