1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2020 Robert Mustacchi
25 * Copyright 2025 Hans Rosenfeld
26 */
27
28 /* Copyright (c) 1988 AT&T */
29 /* All Rights Reserved */
30
31 #include "lint.h"
32 #include "mtlib.h"
33 #include "file64.h"
34 #include <sys/types.h>
35 #include <stdlib.h>
36 #include <stdio.h>
37 #include <thread.h>
38 #include <synch.h>
39 #include <unistd.h>
40 #include <string.h>
41 #include "stdiom.h"
42 #include <wchar.h>
43 #include <sys/stat.h>
44 #include <stddef.h>
45 #include <errno.h>
46 #include <fcntl.h>
47 #include <sys/debug.h>
48 #include <limits.h>
49
50 #define _iob __iob
51
52 #undef end
53
54 #define FILE_ARY_SZ 8 /* a nice size for FILE array & end_buffer_ptrs */
55
56 #ifdef _LP64
57
58 /*
59 * Macros to declare and loop over a fp or fp/xfp combo to
60 * avoid some of the _LP64 ifdef hell.
61 */
62
63 #define FPDECL(fp) FILE *fp
64 #define FIRSTFP(lp, fp) fp = lp->iobp
65 #define NEXTFP(fp) fp++
66 #define FPLOCK(fp) &fp->_lock
67 #define FPSTATE(fp) &fp->_state
68
69 #define xFILE FILE
70
71 #else
72
73 #define FPDECL(fp) FILE *fp; xFILE *x##fp
74 #define FIRSTFP(lp, fp) x##fp = lp->iobp; \
75 fp = x##fp ? &x##fp->_iob : &_iob[0]
76 #define NEXTFP(fp) (x##fp ? fp = &(++x##fp)->_iob : ++fp)
77 #define FPLOCK(fp) x##fp ? \
78 &x##fp->xlock : &_xftab[IOPIND(fp)]._lock
79 #define FPSTATE(fp) x##fp ? \
80 &x##fp->xstate : &_xftab[IOPIND(fp)]._state
81
82 /* The extended 32-bit file structure for use in link buffers */
83 typedef struct xFILE {
84 FILE _iob; /* must be first! */
85 struct xFILEdata _xdat;
86 } xFILE;
87
88 #define xmagic _xdat._magic
89 #define xend _xdat._end
90 #define xlock _xdat._lock
91 #define xstate _xdat._state
92
93 #define FILEx(fp) ((struct xFILE *)(uintptr_t)fp)
94
95 /*
96 * The magic number stored is actually the pointer scrambled with
97 * a magic number. Pointers to data items live everywhere in memory
98 * so we scramble the pointer in order to avoid accidental collisions.
99 */
100 #define XFILEMAGIC 0x63687367
101 #define XMAGIC(xfp) ((uintptr_t)(xfp) ^ XFILEMAGIC)
102
103 #endif /* _LP64 */
104
105 struct _link_ /* manages a list of streams */
106 {
107 xFILE *iobp; /* the array of (x)FILE's */
108 /* NULL for the __first_link in ILP32 */
109 int niob; /* length of the arrays */
110 struct _link_ *next; /* next in the list */
111 };
112
113 /*
114 * With dynamic linking, iob may be in either the library or in the user's
115 * a.out, so the run time linker fixes up the first entry in __first_link at
116 * process startup time.
117 *
118 * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
119 * and _xftab[] instead; this is denoted by having iobp set to NULL in
120 * 32 bit mode for the first link entry.
121 */
122 struct _link_ __first_link = /* first in linked list */
123 {
124 #if !defined(_LP64)
125 NULL,
126 #else
127 &_iob[0],
128 #endif
129 _NFILE,
130 NULL
131 };
132
133 /*
134 * Information cached to speed up searches. We remember where we
135 * last found a free FILE* and we remember whether we saw any fcloses
136 * in between. We also count the number of chunks we allocated, see
137 * _findiop() for an explanation.
138 * These variables are all protected by _first_link_lock.
139 */
140 static struct _link_ *lastlink = NULL;
141 static int fcloses;
142 static int nchunks;
143
144 static mutex_t _first_link_lock = DEFAULTMUTEX;
145
146 static int _fflush_l_iops(void);
147 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *);
148
149 /*
150 * All functions that understand the linked list of iob's follow.
151 */
152 #pragma weak _cleanup = __cleanup
153 void
__cleanup(void)154 __cleanup(void) /* called at process end to flush ouput streams */
155 {
156 (void) fflush(NULL);
157 }
158
159 /*
160 * For fork1-safety (see libc_prepare_atfork(), etc).
161 */
162 void
stdio_locks()163 stdio_locks()
164 {
165 (void) mutex_lock(&_first_link_lock);
166 /*
167 * XXX: We should acquire all of the iob locks here.
168 */
169 }
170
171 void
stdio_unlocks()172 stdio_unlocks()
173 {
174 /*
175 * XXX: We should release all of the iob locks here.
176 */
177 (void) mutex_unlock(&_first_link_lock);
178 }
179
180 void
_flushlbf(void)181 _flushlbf(void) /* fflush() all line-buffered streams */
182 {
183 FPDECL(fp);
184 int i;
185 struct _link_ *lp;
186 /* Allow compiler to optimize the loop */
187 int threaded = __libc_threaded;
188
189 if (threaded)
190 cancel_safe_mutex_lock(&_first_link_lock);
191
192 lp = &__first_link;
193 do {
194 FIRSTFP(lp, fp);
195 for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
196 /*
197 * The additional _IONBF check guards againsts
198 * allocated but uninitialized iops (see _findiop).
199 * We also automatically skip non allocated iop's.
200 * Don't block on locks.
201 */
202 if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) ==
203 (_IOLBF | _IOWRT)) {
204 if (threaded) {
205 rmutex_t *lk = FPLOCK(fp);
206 if (cancel_safe_mutex_trylock(lk) != 0)
207 continue;
208 /* Recheck after locking */
209 if ((fp->_flag & (_IOLBF | _IOWRT)) ==
210 (_IOLBF | _IOWRT)) {
211 (void) _fflush_u(fp);
212 }
213 cancel_safe_mutex_unlock(lk);
214 } else {
215 (void) _fflush_u(fp);
216 }
217 }
218 }
219 } while ((lp = lp->next) != NULL);
220
221 if (threaded)
222 cancel_safe_mutex_unlock(&_first_link_lock);
223 }
224
225 /* allocate an unused stream; NULL if cannot */
226 FILE *
_findiop(void)227 _findiop(void)
228 {
229 struct _link_ *lp, **prev;
230
231 /* used so there only needs to be one malloc() */
232 #ifdef _LP64
233 typedef struct {
234 struct _link_ hdr;
235 FILE iob[FILE_ARY_SZ];
236 } Pkg;
237 #else
238 typedef union {
239 struct { /* Normal */
240 struct _link_ hdr;
241 xFILE iob[FILE_ARY_SZ];
242 } Pkgn;
243 struct { /* Reversed */
244 xFILE iob[FILE_ARY_SZ];
245 struct _link_ hdr;
246 } Pkgr;
247 } Pkg;
248 uintptr_t delta;
249 #endif
250 Pkg *pkgp;
251 struct _link_ *hdr;
252 FPDECL(fp);
253 int i;
254 int threaded = __libc_threaded;
255
256 if (threaded)
257 cancel_safe_mutex_lock(&_first_link_lock);
258
259 if (lastlink == NULL) {
260 rescan:
261 fcloses = 0;
262 lastlink = &__first_link;
263 }
264
265 lp = lastlink;
266
267 /*
268 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
269 * and for allocation of new links
270 * low contention expected on _findiop(), hence coarse locking.
271 * for finer granularity, use fp->_lock for allocating an iop
272 * and make the testing of lp->next and allocation of new link atomic
273 * using lp->_lock
274 */
275
276 do {
277 prev = &lp->next;
278 FIRSTFP(lp, fp);
279
280 for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
281 FILE *ret;
282 if (threaded) {
283 ret = getiop(fp, FPLOCK(fp), FPSTATE(fp));
284 if (ret != NULL) {
285 cancel_safe_mutex_unlock(
286 &_first_link_lock);
287 return (ret);
288 }
289 } else {
290 ret = getiop(fp, NULL, FPSTATE(fp));
291 if (ret != NULL)
292 return (ret);
293 }
294 }
295 } while ((lastlink = lp = lp->next) != NULL);
296
297 /*
298 * If there was a sufficient number of fcloses since we last started
299 * at __first_link, we rescan all fp's again. We do not rescan for
300 * all fcloses; that would simplify the algorithm but would make
301 * search times near O(n) again.
302 * Worst case behaviour would still be pretty bad (open a full set,
303 * then continously opening and closing one FILE * gets you a full
304 * scan each time). That's why we over allocate 1 FILE for each
305 * 32 chunks. More over allocation is better; this is a nice
306 * empirical value which doesn't cost a lot of memory, doesn't
307 * overallocate until we reach 256 FILE *s and keeps the performance
308 * pretty close to the optimum.
309 */
310 if (fcloses > nchunks/32)
311 goto rescan;
312
313 /*
314 * Need to allocate another and put it in the linked list.
315 */
316 if ((pkgp = malloc(sizeof (Pkg))) == NULL) {
317 if (threaded)
318 cancel_safe_mutex_unlock(&_first_link_lock);
319 return (NULL);
320 }
321
322 (void) memset(pkgp, 0, sizeof (Pkg));
323
324 #ifdef _LP64
325 hdr = &pkgp->hdr;
326 hdr->iobp = &pkgp->iob[0];
327 #else
328 /*
329 * The problem with referencing a word after a FILE* is the possibility
330 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
331 * boundary. We run this check so we never need to run an expensive
332 * check like mincore() in order to know whether it is
333 * safe to dereference ((xFILE*)fp)->xmagic.
334 * We allocate the block with two alternative layouts; if one
335 * layout is not properly aligned for our purposes, the other layout
336 * will be because the size of _link_ is small compared to
337 * sizeof (xFILE).
338 * The check performed is this:
339 * If the distance from pkgp to the end of the page is
340 * less than the the offset of the last xmagic field in the
341 * xFILE structure, (the 0x1000 boundary is inside our just
342 * allocated structure) and the distance modulo the size of xFILE
343 * is identical to the offset of the first xmagic in the
344 * structure (i.e., XXXXXX000 points to an xmagic field),
345 * we need to use the reverse structure.
346 */
347 if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <=
348 offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) &&
349 delta % sizeof (struct xFILE) ==
350 offsetof(Pkg, Pkgn.iob[0].xmagic)) {
351 /* Use reversed structure */
352 hdr = &pkgp->Pkgr.hdr;
353 hdr->iobp = &pkgp->Pkgr.iob[0];
354 } else {
355 /* Use normal structure */
356 hdr = &pkgp->Pkgn.hdr;
357 hdr->iobp = &pkgp->Pkgn.iob[0];
358 }
359 #endif /* _LP64 */
360
361 hdr->niob = FILE_ARY_SZ;
362 nchunks++;
363
364 #ifdef _LP64
365 fp = hdr->iobp;
366 for (i = 0; i < FILE_ARY_SZ; i++)
367 (void) mutex_init(&fp[i]._lock,
368 USYNC_THREAD | LOCK_RECURSIVE, NULL);
369 #else
370 xfp = hdr->iobp;
371 fp = &xfp->_iob;
372
373 for (i = 0; i < FILE_ARY_SZ; i++) {
374 xfp[i].xmagic = XMAGIC(&xfp[i]);
375 (void) mutex_init(&xfp[i].xlock,
376 USYNC_THREAD | LOCK_RECURSIVE, NULL);
377 }
378 #endif /* _LP64 */
379
380 lastlink = *prev = hdr;
381 fp->_ptr = 0;
382 fp->_base = 0;
383 /* claim the fp by setting low 8 bits */
384 fp->_flag = _DEF_FLAG_MASK;
385 if (threaded)
386 cancel_safe_mutex_unlock(&_first_link_lock);
387
388 return (fp);
389 }
390
391 static void
isseekable(FILE * iop)392 isseekable(FILE *iop)
393 {
394 struct stat64 fstatbuf;
395 int fd, save_errno;
396
397 save_errno = errno;
398
399 /*
400 * non-FILE based STREAMS are required to declare their own seekability
401 * and therefore we should not try and test them below.
402 */
403 fd = _get_fd(iop);
404 if (fd == -1) {
405 return;
406 }
407 if (fstat64(fd, &fstatbuf) != 0) {
408 /*
409 * when we don't know what it is we'll
410 * do the old behaviour and flush
411 * the stream
412 */
413 SET_SEEKABLE(iop);
414 errno = save_errno;
415 return;
416 }
417
418 /*
419 * check for what is non-SEEKABLE
420 * otherwise assume it's SEEKABLE so we get the old
421 * behaviour and flush the stream
422 */
423
424 if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) ||
425 S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) {
426 CLEAR_SEEKABLE(iop);
427 } else {
428 SET_SEEKABLE(iop);
429 }
430
431 errno = save_errno;
432 }
433
434 #ifdef _LP64
435 void
_setbufend(FILE * iop,Uchar * end)436 _setbufend(FILE *iop, Uchar *end) /* set the end pointer for this iop */
437 {
438 iop->_end = end;
439
440 isseekable(iop);
441 }
442
443 #undef _realbufend
444
445 Uchar *
_realbufend(FILE * iop)446 _realbufend(FILE *iop) /* get the end pointer for this iop */
447 {
448 return (iop->_end);
449 }
450
451 #else /* _LP64 */
452
453 /*
454 * Awkward functions not needed for the sane 64 bit environment.
455 */
456 /*
457 * xmagic must not be aligned on a 4K boundary. We guarantee this in
458 * _findiop().
459 */
460 #define VALIDXFILE(xfp) \
461 (((uintptr_t)&(xfp)->xmagic & 0xfff) && \
462 (xfp)->xmagic == XMAGIC(FILEx(xfp)))
463
464 static struct xFILEdata *
getxfdat(FILE * iop)465 getxfdat(FILE *iop)
466 {
467 if (STDIOP(iop))
468 return (&_xftab[IOPIND(iop)]);
469 else if (VALIDXFILE(FILEx(iop)))
470 return (&FILEx(iop)->_xdat);
471 else
472 return (NULL);
473 }
474
475 void
_setbufend(FILE * iop,Uchar * end)476 _setbufend(FILE *iop, Uchar *end) /* set the end pointer for this iop */
477 {
478 struct xFILEdata *dat = getxfdat(iop);
479
480 if (dat != NULL)
481 dat->_end = end;
482
483 isseekable(iop);
484
485 /*
486 * For binary compatibility with user programs using the
487 * old _bufend macro. This is *so* broken, fileno()
488 * is not the proper index.
489 */
490 if (iop->_magic < _NFILE)
491 _bufendtab[iop->_magic] = end;
492
493 }
494
495 Uchar *
_realbufend(FILE * iop)496 _realbufend(FILE *iop) /* get the end pointer for this iop */
497 {
498 struct xFILEdata *dat = getxfdat(iop);
499
500 if (dat != NULL)
501 return (dat->_end);
502
503 return (NULL);
504 }
505
506 /*
507 * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
508 * it is therefor extremely performance sensitive. We get better performance
509 * by inlining the STDIOP check in IOB_LCK and inlining a custom version
510 * of getfxdat() here.
511 */
512 rmutex_t *
_reallock(FILE * iop)513 _reallock(FILE *iop)
514 {
515 if (VALIDXFILE(FILEx(iop)))
516 return (&FILEx(iop)->xlock);
517
518 return (NULL);
519 }
520
521 #endif /* _LP64 */
522
523 /* make sure _cnt, _ptr are correct */
524 void
_bufsync(FILE * iop,Uchar * bufend)525 _bufsync(FILE *iop, Uchar *bufend)
526 {
527 ssize_t spaceleft;
528
529 spaceleft = bufend - iop->_ptr;
530 if (bufend < iop->_ptr) {
531 iop->_ptr = bufend;
532 iop->_cnt = 0;
533 } else if (spaceleft < iop->_cnt)
534 iop->_cnt = spaceleft;
535 }
536
537 /* really write out current buffer contents */
538 int
_xflsbuf(FILE * iop)539 _xflsbuf(FILE *iop)
540 {
541 ssize_t n;
542 Uchar *base = iop->_base;
543 Uchar *bufend;
544 ssize_t num_wrote;
545
546 /*
547 * Hopefully, be stable with respect to interrupts...
548 */
549 n = iop->_ptr - base;
550 iop->_ptr = base;
551 bufend = _bufend(iop);
552 if (iop->_flag & (_IOLBF | _IONBF))
553 iop->_cnt = 0; /* always go to a flush */
554 else
555 iop->_cnt = bufend - base;
556
557 if (_needsync(iop, bufend)) /* recover from interrupts */
558 _bufsync(iop, bufend);
559
560 if (n > 0) {
561 while ((num_wrote = _xwrite(iop, base, (size_t)n)) != n) {
562 if (num_wrote <= 0) {
563 if (!cancel_active())
564 iop->_flag |= _IOERR;
565 return (EOF);
566 }
567 n -= num_wrote;
568 base += num_wrote;
569 }
570 }
571 return (0);
572 }
573
574 /* flush (write) buffer */
575 int
fflush(FILE * iop)576 fflush(FILE *iop)
577 {
578 int res;
579 rmutex_t *lk;
580
581 if (iop) {
582 FLOCKFILE(lk, iop);
583 res = _fflush_u(iop);
584 FUNLOCKFILE(lk);
585 } else {
586 res = _fflush_l_iops(); /* flush all iops */
587 }
588 return (res);
589 }
590
591 static int
_fflush_l_iops(void)592 _fflush_l_iops(void) /* flush all buffers */
593 {
594 FPDECL(iop);
595
596 int i;
597 struct _link_ *lp;
598 int res = 0;
599 rmutex_t *lk;
600 /* Allow the compiler to optimize the load out of the loop */
601 int threaded = __libc_threaded;
602
603 if (threaded)
604 cancel_safe_mutex_lock(&_first_link_lock);
605
606 lp = &__first_link;
607
608 do {
609 /*
610 * We need to grab the file locks or file corruption
611 * will happen. But we first check the flags field
612 * knowing that when it is 0, it isn't allocated and
613 * cannot be allocated while we're holding the
614 * _first_link_lock. And when _IONBF is set (also the
615 * case when _flag is 0377 -- _DEF_FLAG_MASK, or alloc in
616 * progress), we also ignore it.
617 *
618 * Ignore locked streams; it will appear as if
619 * concurrent updates happened after fflush(NULL). Note
620 * that we even attempt to lock if the locking is set to
621 * "by caller". We don't want to penalize callers of
622 * __fsetlocking() by not flushing their files. Note: if
623 * __fsetlocking() callers don't employ any locking, they
624 * may still face corruption in fflush(NULL); but that's
625 * no change from earlier releases.
626 */
627 FIRSTFP(lp, iop);
628 for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
629 unsigned int flag = iop->_flag;
630
631 /* flag 0, flag 0377, or _IONBF set */
632 if (flag == 0 || (flag & _IONBF) != 0)
633 continue;
634
635 if (threaded) {
636 lk = FPLOCK(iop);
637 if (cancel_safe_mutex_trylock(lk) != 0)
638 continue;
639 }
640
641 if (!(iop->_flag & _IONBF)) {
642 /*
643 * don't need to worry about the _IORW case
644 * since the iop will also marked with _IOREAD
645 * or _IOWRT whichever we are really doing
646 */
647 if (iop->_flag & _IOWRT) {
648 /* Flush write buffers */
649 res |= _fflush_u(iop);
650 } else if (iop->_flag & _IOREAD) {
651 /*
652 * flush seekable read buffers
653 * don't flush non-seekable read buffers
654 */
655 if (GET_SEEKABLE(iop)) {
656 res |= _fflush_u(iop);
657 }
658 }
659 }
660 if (threaded)
661 cancel_safe_mutex_unlock(lk);
662 }
663 } while ((lp = lp->next) != NULL);
664 if (threaded)
665 cancel_safe_mutex_unlock(&_first_link_lock);
666 return (res);
667 }
668
669 /* flush buffer */
670 int
_fflush_u(FILE * iop)671 _fflush_u(FILE *iop)
672 {
673 int res = 0;
674
675 /* this portion is always assumed locked */
676 if (!(iop->_flag & _IOWRT)) {
677 (void) _xseek64(iop, -iop->_cnt, SEEK_CUR);
678 iop->_cnt = 0;
679 /* needed for ungetc & multibyte pushbacks */
680 iop->_ptr = iop->_base;
681 if (iop->_flag & _IORW) {
682 iop->_flag &= ~_IOREAD;
683 }
684 return (0);
685 }
686 if (iop->_base != NULL && iop->_ptr > iop->_base) {
687 res = _xflsbuf(iop);
688 }
689 if (iop->_flag & _IORW) {
690 iop->_flag &= ~_IOWRT;
691 iop->_cnt = 0;
692 }
693 return (res);
694 }
695
696 /* helper for fclose/fdclose/fcloseall */
697 static int
fclose_helper(FILE * iop,boolean_t doclose)698 fclose_helper(FILE *iop, boolean_t doclose)
699 {
700 int res = 0;
701 rmutex_t *lk;
702
703 if (iop == NULL) {
704 return (EOF); /* avoid passing zero to FLOCKFILE */
705 }
706
707 FLOCKFILE(lk, iop);
708 if (iop->_flag == 0) {
709 FUNLOCKFILE(lk);
710 return (EOF);
711 }
712
713 /* Is not unbuffered and opened for read and/or write ? */
714 if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
715 res = _fflush_u(iop);
716
717 if (doclose)
718 if (_xclose(iop) < 0)
719 res = EOF;
720
721 if (iop->_flag & _IOMYBUF) {
722 (void) free((char *)iop->_base - PUSHBACK);
723 }
724
725 iop->_base = NULL;
726 iop->_ptr = NULL;
727 iop->_cnt = 0;
728 iop->_flag = 0; /* marks it as available */
729 FUNLOCKFILE(lk);
730
731 return (res);
732 }
733
734 /* flush buffer and close stream */
735 int
fclose(FILE * iop)736 fclose(FILE *iop)
737 {
738 int res = 0;
739
740 res = fclose_helper(iop, B_TRUE);
741
742 if (__libc_threaded)
743 cancel_safe_mutex_lock(&_first_link_lock);
744 fcloses++;
745 if (__libc_threaded)
746 cancel_safe_mutex_unlock(&_first_link_lock);
747
748 return (res);
749 }
750
751 /*
752 * fdclose() works like fclose(), except it doesn't close the underlying file
753 * descriptor.
754 *
755 * That is, however, not true for streams which aren't backed by a file
756 * descriptor such as memory streams, as indicated by them having a special
757 * ops vector, which we infer from the file descriptor being -1. In this case
758 * fdclose() returns EOF, sets errno to EOPNOTSUP, but still closes the FILE
759 * just like fclose() would. This is to make sure we're compatible with BSD.
760 */
761 int
fdclose(FILE * iop,int * fdp)762 fdclose(FILE *iop, int *fdp)
763 {
764 int res = 0;
765 int fd = _get_fd(iop);
766
767 if (fd == -1) {
768 res = fclose_helper(iop, B_TRUE);
769 errno = ENOTSUP;
770 } else {
771 res = fclose_helper(iop, B_FALSE);
772 }
773
774 if (__libc_threaded)
775 cancel_safe_mutex_lock(&_first_link_lock);
776 fcloses++;
777 if (__libc_threaded)
778 cancel_safe_mutex_unlock(&_first_link_lock);
779
780 if (fdp != NULL)
781 *fdp = fd;
782
783 if (fd == -1)
784 res = EOF;
785
786 return (res);
787 }
788
789 /* close all open streams */
790 int
fcloseall(void)791 fcloseall(void)
792 {
793 FPDECL(iop);
794
795 struct _link_ *lp;
796
797 if (__libc_threaded)
798 cancel_safe_mutex_lock(&_first_link_lock);
799
800 lp = &__first_link;
801
802 do {
803 int i;
804
805 FIRSTFP(lp, iop);
806 for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
807 (void) fclose_helper(iop, B_TRUE);
808 fcloses++;
809 }
810 } while ((lp = lp->next) != NULL);
811
812 if (__libc_threaded)
813 cancel_safe_mutex_unlock(&_first_link_lock);
814
815 return (0);
816 }
817
818 /* flush buffer, close fd but keep the stream used by freopen() */
819 int
close_fd(FILE * iop)820 close_fd(FILE *iop)
821 {
822 int res = 0;
823 mbstate_t *mb;
824
825 if (iop == NULL || iop->_flag == 0)
826 return (EOF);
827 /* Is not unbuffered and opened for read and/or write ? */
828 if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
829 res = _fflush_u(iop);
830 if (_xclose(iop) < 0)
831 res = EOF;
832 if (iop->_flag & _IOMYBUF) {
833 (void) free((char *)iop->_base - PUSHBACK);
834 }
835 iop->_base = NULL;
836 iop->_ptr = NULL;
837 mb = _getmbstate(iop);
838 if (mb != NULL)
839 (void) memset(mb, 0, sizeof (mbstate_t));
840 iop->_cnt = 0;
841 _setorientation(iop, _NO_MODE);
842 return (res);
843 }
844
845 static FILE *
getiop(FILE * fp,rmutex_t * lk,mbstate_t * mb)846 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb)
847 {
848 if (lk != NULL && cancel_safe_mutex_trylock(lk) != 0)
849 return (NULL); /* locked: fp in use */
850
851 if (fp->_flag == 0) { /* unused */
852 #ifndef _LP64
853 fp->__orientation = 0;
854 #endif /* _LP64 */
855 fp->_cnt = 0;
856 fp->_ptr = NULL;
857 fp->_base = NULL;
858 /* claim the fp by setting low 8 bits */
859 fp->_flag = _DEF_FLAG_MASK;
860 (void) memset(mb, 0, sizeof (mbstate_t));
861 FUNLOCKFILE(lk);
862 return (fp);
863 }
864 FUNLOCKFILE(lk);
865 return (NULL);
866 }
867
868 #ifndef _LP64
869 /*
870 * DESCRIPTION:
871 * This function gets the pointer to the mbstate_t structure associated
872 * with the specified iop.
873 *
874 * RETURNS:
875 * If the associated mbstate_t found, the pointer to the mbstate_t is
876 * returned. Otherwise, NULL is returned.
877 */
878 mbstate_t *
_getmbstate(FILE * iop)879 _getmbstate(FILE *iop)
880 {
881 struct xFILEdata *dat = getxfdat(iop);
882
883 if (dat != NULL)
884 return (&dat->_state);
885
886 return (NULL);
887 }
888
889 /*
890 * More 32-bit only functions.
891 * They lookup/set large fd's for extended FILE support.
892 */
893
894 /*
895 * The negative value indicates that Extended fd FILE's has not
896 * been enabled by the user.
897 */
898 static int bad_fd = -1;
899
900 int
_file_get(FILE * iop)901 _file_get(FILE *iop)
902 {
903 int altfd;
904
905 /*
906 * Failure indicates a FILE * not allocated through stdio;
907 * it means the flag values are probably bogus and that if
908 * a file descriptor is set, it's in _magic.
909 * Inline getxfdat() for performance reasons.
910 */
911 if (STDIOP(iop))
912 altfd = _xftab[IOPIND(iop)]._altfd;
913 else if (VALIDXFILE(FILEx(iop)))
914 altfd = FILEx(iop)->_xdat._altfd;
915 else
916 return (iop->_magic);
917 /*
918 * if this is not an internal extended FILE then check
919 * if _file is being changed from underneath us.
920 * It should not be because if
921 * it is then then we lose our ability to guard against
922 * silent data corruption.
923 */
924 if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) {
925 (void) fprintf(stderr,
926 "Application violated extended FILE safety mechanism.\n"
927 "Please read the man page for extendedFILE.\nAborting\n");
928 abort();
929 }
930 return (altfd);
931 }
932
933 int
_file_set(FILE * iop,int fd,const char * type)934 _file_set(FILE *iop, int fd, const char *type)
935 {
936 struct xFILEdata *dat;
937 int Fflag;
938
939 /* Already known to contain at least one byte */
940 while (*++type != '\0')
941 ;
942
943 Fflag = type[-1] == 'F';
944 if (!Fflag && bad_fd < 0) {
945 errno = EMFILE;
946 return (-1);
947 }
948
949 dat = getxfdat(iop);
950 iop->__extendedfd = 1;
951 iop->__xf_nocheck = Fflag;
952 dat->_altfd = fd;
953 iop->_magic = (unsigned char)bad_fd;
954 return (0);
955 }
956
957 /*
958 * Activates extended fd's in FILE's
959 */
960
961 static const int tries[] = {196, 120, 60, 3};
962 #define NTRIES (sizeof (tries)/sizeof (int))
963
964 int
enable_extended_FILE_stdio(int fd,int action)965 enable_extended_FILE_stdio(int fd, int action)
966 {
967 int i;
968
969 if (action < 0)
970 action = SIGABRT; /* default signal */
971
972 if (fd < 0) {
973 /*
974 * search for an available fd and make it the badfd
975 */
976 for (i = 0; i < NTRIES; i++) {
977 fd = fcntl(tries[i], F_BADFD, action);
978 if (fd >= 0)
979 break;
980 }
981 if (fd < 0) /* failed to find an available fd */
982 return (-1);
983 } else {
984 /* caller requests that fd be the chosen badfd */
985 int nfd = fcntl(fd, F_BADFD, action);
986 if (nfd < 0 || nfd != fd)
987 return (-1);
988 }
989 bad_fd = fd;
990 return (0);
991 }
992 #endif
993
994 /*
995 * Wrappers around the various system calls that stdio needs to make on a file
996 * descriptor.
997 */
998 static stdio_ops_t *
get_stdops(FILE * iop)999 get_stdops(FILE *iop)
1000 {
1001 #ifdef _LP64
1002 return (iop->_ops);
1003 #else
1004 struct xFILEdata *dat = getxfdat(iop);
1005 return (dat->_ops);
1006 #endif
1007 }
1008
1009 static void
set_stdops(FILE * iop,stdio_ops_t * ops)1010 set_stdops(FILE *iop, stdio_ops_t *ops)
1011 {
1012 #ifdef _LP64
1013 ASSERT3P(iop->_ops, ==, NULL);
1014 iop->_ops = ops;
1015 #else
1016 struct xFILEdata *dat = getxfdat(iop);
1017 ASSERT3P(dat->_ops, ==, NULL);
1018 dat->_ops = ops;
1019 #endif
1020
1021 }
1022
1023 static void
clr_stdops(FILE * iop)1024 clr_stdops(FILE *iop)
1025 {
1026 #ifdef _LP64
1027 iop->_ops = NULL;
1028 #else
1029 struct xFILEdata *dat = getxfdat(iop);
1030 dat->_ops = NULL;
1031 #endif
1032
1033 }
1034
1035 ssize_t
_xread(FILE * iop,void * buf,size_t nbytes)1036 _xread(FILE *iop, void *buf, size_t nbytes)
1037 {
1038 stdio_ops_t *ops = get_stdops(iop);
1039 if (ops != NULL) {
1040 return (ops->std_read(iop, buf, nbytes));
1041 }
1042
1043 return (read(_get_fd(iop), buf, nbytes));
1044 }
1045
1046 ssize_t
_xwrite(FILE * iop,const void * buf,size_t nbytes)1047 _xwrite(FILE *iop, const void *buf, size_t nbytes)
1048 {
1049 stdio_ops_t *ops = get_stdops(iop);
1050 if (ops != NULL) {
1051 return (ops->std_write(iop, buf, nbytes));
1052 }
1053 return (write(_get_fd(iop), buf, nbytes));
1054 }
1055
1056 off_t
_xseek(FILE * iop,off_t off,int whence)1057 _xseek(FILE *iop, off_t off, int whence)
1058 {
1059 stdio_ops_t *ops = get_stdops(iop);
1060 if (ops != NULL) {
1061 return (ops->std_seek(iop, off, whence));
1062 }
1063
1064 return (lseek(_get_fd(iop), off, whence));
1065 }
1066
1067 off64_t
_xseek64(FILE * iop,off64_t off,int whence)1068 _xseek64(FILE *iop, off64_t off, int whence)
1069 {
1070 stdio_ops_t *ops = get_stdops(iop);
1071 if (ops != NULL) {
1072 /*
1073 * The internal APIs only operate with an off_t. An off64_t in
1074 * an ILP32 environment may represent a value larger than they
1075 * can accept. As such, we try and catch such cases and error
1076 * about it before we get there.
1077 */
1078 if (off > LONG_MAX || off < LONG_MIN) {
1079 errno = EOVERFLOW;
1080 return (-1);
1081 }
1082 return (ops->std_seek(iop, off, whence));
1083 }
1084
1085 return (lseek64(_get_fd(iop), off, whence));
1086 }
1087
1088 int
_xclose(FILE * iop)1089 _xclose(FILE *iop)
1090 {
1091 stdio_ops_t *ops = get_stdops(iop);
1092 if (ops != NULL) {
1093 return (ops->std_close(iop));
1094 }
1095
1096 return (close(_get_fd(iop)));
1097 }
1098
1099 void *
_xdata(FILE * iop)1100 _xdata(FILE *iop)
1101 {
1102 stdio_ops_t *ops = get_stdops(iop);
1103 if (ops != NULL) {
1104 return (ops->std_data);
1105 }
1106
1107 return (NULL);
1108 }
1109
1110 int
_xassoc(FILE * iop,fread_t readf,fwrite_t writef,fseek_t seekf,fclose_t closef,void * data)1111 _xassoc(FILE *iop, fread_t readf, fwrite_t writef, fseek_t seekf,
1112 fclose_t closef, void *data)
1113 {
1114 stdio_ops_t *ops = get_stdops(iop);
1115
1116 if (ops == NULL) {
1117 ops = malloc(sizeof (*ops));
1118 if (ops == NULL) {
1119 return (-1);
1120 }
1121 set_stdops(iop, ops);
1122 }
1123
1124 ops->std_read = readf;
1125 ops->std_write = writef;
1126 ops->std_seek = seekf;
1127 ops->std_close = closef;
1128 ops->std_data = data;
1129
1130 return (0);
1131 }
1132
1133 void
_xunassoc(FILE * iop)1134 _xunassoc(FILE *iop)
1135 {
1136 stdio_ops_t *ops = get_stdops(iop);
1137 if (ops == NULL) {
1138 return;
1139 }
1140 clr_stdops(iop);
1141 free(ops);
1142 }
1143
1144 int
_get_fd(FILE * iop)1145 _get_fd(FILE *iop)
1146 {
1147 /*
1148 * Streams with an ops vector (currently the memory stream family) do
1149 * not have an underlying file descriptor that we can give back to the
1150 * user. In such cases, return -1 to explicitly make sure that they'll
1151 * get an ebadf from things.
1152 */
1153 if (get_stdops(iop) != NULL) {
1154 return (-1);
1155 }
1156 #ifdef _LP64
1157 return (iop->_file);
1158 #else
1159 if (iop->__extendedfd) {
1160 return (_file_get(iop));
1161 } else {
1162 return (iop->_magic);
1163 }
1164 #endif
1165 }
1166