xref: /illumos-gate/usr/src/lib/libc/port/stdio/flush.c (revision 8119dad84d6416f13557b0ba8e2aaf9064cbcfd3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2020 Robert Mustacchi
25  */
26 
27 /*	Copyright (c) 1988 AT&T	*/
28 /*	  All Rights Reserved	*/
29 
30 #include "lint.h"
31 #include "mtlib.h"
32 #include "file64.h"
33 #include <sys/types.h>
34 #include <stdlib.h>
35 #include <stdio.h>
36 #include <thread.h>
37 #include <synch.h>
38 #include <unistd.h>
39 #include <string.h>
40 #include "stdiom.h"
41 #include <wchar.h>
42 #include <sys/stat.h>
43 #include <stddef.h>
44 #include <errno.h>
45 #include <fcntl.h>
46 #include <sys/debug.h>
47 #include <limits.h>
48 
49 #define	_iob	__iob
50 
51 #undef end
52 
53 #define	FILE_ARY_SZ	8 /* a nice size for FILE array & end_buffer_ptrs */
54 
55 #ifdef	_LP64
56 
57 /*
58  * Macros to declare and loop over a fp or fp/xfp combo to
59  * avoid some of the _LP64 ifdef hell.
60  */
61 
62 #define	FPDECL(fp)		FILE *fp
63 #define	FIRSTFP(lp, fp)		fp = lp->iobp
64 #define	NEXTFP(fp)		fp++
65 #define	FPLOCK(fp)		&fp->_lock
66 #define	FPSTATE(fp)		&fp->_state
67 
68 #define	xFILE			FILE
69 
70 #else
71 
72 #define	FPDECL(fp)		FILE *fp; xFILE *x##fp
73 #define	FIRSTFP(lp, fp)		x##fp = lp->iobp; \
74 				fp = x##fp ? &x##fp->_iob : &_iob[0]
75 #define	NEXTFP(fp)		(x##fp ? fp = &(++x##fp)->_iob : ++fp)
76 #define	FPLOCK(fp)		x##fp ? \
77 				    &x##fp->xlock : &_xftab[IOPIND(fp)]._lock
78 #define	FPSTATE(fp)		x##fp ? \
79 				    &x##fp->xstate : &_xftab[IOPIND(fp)]._state
80 
81 /* The extended 32-bit file structure for use in link buffers */
82 typedef struct xFILE {
83 	FILE			_iob;		/* must be first! */
84 	struct xFILEdata	_xdat;
85 } xFILE;
86 
87 #define	xmagic			_xdat._magic
88 #define	xend			_xdat._end
89 #define	xlock			_xdat._lock
90 #define	xstate			_xdat._state
91 
92 #define	FILEx(fp)		((struct xFILE *)(uintptr_t)fp)
93 
94 /*
95  * The magic number stored is actually the pointer scrambled with
96  * a magic number.  Pointers to data items live everywhere in memory
97  * so we scramble the pointer in order to avoid accidental collisions.
98  */
99 #define	XFILEMAGIC		0x63687367
100 #define	XMAGIC(xfp)		((uintptr_t)(xfp) ^ XFILEMAGIC)
101 
102 #endif /* _LP64 */
103 
104 struct _link_	/* manages a list of streams */
105 {
106 	xFILE *iobp;		/* the array of (x)FILE's */
107 				/* NULL for the __first_link in ILP32 */
108 	int	niob;		/* length of the arrays */
109 	struct _link_	*next;	/* next in the list */
110 };
111 
112 /*
113  * With dynamic linking, iob may be in either the library or in the user's
114  * a.out, so the run time linker fixes up the first entry in __first_link at
115  * process startup time.
116  *
117  * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
118  * and _xftab[] instead; this is denoted by having iobp set to NULL in
119  * 32 bit mode for the first link entry.
120  */
121 struct _link_ __first_link =	/* first in linked list */
122 {
123 #if !defined(_LP64)
124 	NULL,
125 #else
126 	&_iob[0],
127 #endif
128 	_NFILE,
129 	NULL
130 };
131 
132 /*
133  * Information cached to speed up searches.  We remember where we
134  * last found a free FILE* and we remember whether we saw any fcloses
135  * in between.  We also count the number of chunks we allocated, see
136  * _findiop() for an explanation.
137  * These variables are all protected by _first_link_lock.
138  */
139 static struct _link_ *lastlink = NULL;
140 static int fcloses;
141 static int nchunks;
142 
143 static mutex_t _first_link_lock = DEFAULTMUTEX;
144 
145 static int _fflush_l_iops(void);
146 static FILE *getiop(FILE *, rmutex_t *, mbstate_t *);
147 
148 /*
149  * All functions that understand the linked list of iob's follow.
150  */
151 #pragma weak _cleanup = __cleanup
152 void
153 __cleanup(void)		/* called at process end to flush ouput streams */
154 {
155 	(void) fflush(NULL);
156 }
157 
158 /*
159  * For fork1-safety (see libc_prepare_atfork(), etc).
160  */
161 void
162 stdio_locks()
163 {
164 	(void) mutex_lock(&_first_link_lock);
165 	/*
166 	 * XXX: We should acquire all of the iob locks here.
167 	 */
168 }
169 
170 void
171 stdio_unlocks()
172 {
173 	/*
174 	 * XXX: We should release all of the iob locks here.
175 	 */
176 	(void) mutex_unlock(&_first_link_lock);
177 }
178 
179 void
180 _flushlbf(void)		/* fflush() all line-buffered streams */
181 {
182 	FPDECL(fp);
183 	int i;
184 	struct _link_ *lp;
185 	/* Allow compiler to optimize the loop */
186 	int threaded = __libc_threaded;
187 
188 	if (threaded)
189 		cancel_safe_mutex_lock(&_first_link_lock);
190 
191 	lp = &__first_link;
192 	do {
193 		FIRSTFP(lp, fp);
194 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
195 			/*
196 			 * The additional _IONBF check guards againsts
197 			 * allocated but uninitialized iops (see _findiop).
198 			 * We also automatically skip non allocated iop's.
199 			 * Don't block on locks.
200 			 */
201 			if ((fp->_flag & (_IOLBF | _IOWRT | _IONBF)) ==
202 			    (_IOLBF | _IOWRT)) {
203 				if (threaded) {
204 					rmutex_t *lk = FPLOCK(fp);
205 					if (cancel_safe_mutex_trylock(lk) != 0)
206 						continue;
207 					/* Recheck after locking */
208 					if ((fp->_flag & (_IOLBF | _IOWRT)) ==
209 					    (_IOLBF | _IOWRT)) {
210 						(void) _fflush_u(fp);
211 					}
212 					cancel_safe_mutex_unlock(lk);
213 				} else {
214 					(void) _fflush_u(fp);
215 				}
216 			}
217 		}
218 	} while ((lp = lp->next) != NULL);
219 
220 	if (threaded)
221 		cancel_safe_mutex_unlock(&_first_link_lock);
222 }
223 
224 /* allocate an unused stream; NULL if cannot */
225 FILE *
226 _findiop(void)
227 {
228 	struct _link_ *lp, **prev;
229 
230 	/* used so there only needs to be one malloc() */
231 #ifdef _LP64
232 	typedef	struct	{
233 		struct _link_	hdr;
234 		FILE	iob[FILE_ARY_SZ];
235 	} Pkg;
236 #else
237 	typedef union {
238 		struct {				/* Normal */
239 			struct _link_	hdr;
240 			xFILE	iob[FILE_ARY_SZ];
241 		} Pkgn;
242 		struct {				/* Reversed */
243 			xFILE	iob[FILE_ARY_SZ];
244 			struct _link_	hdr;
245 		} Pkgr;
246 	} Pkg;
247 	uintptr_t delta;
248 #endif
249 	Pkg *pkgp;
250 	struct _link_ *hdr;
251 	FPDECL(fp);
252 	int i;
253 	int threaded = __libc_threaded;
254 
255 	if (threaded)
256 		cancel_safe_mutex_lock(&_first_link_lock);
257 
258 	if (lastlink == NULL) {
259 rescan:
260 		fcloses = 0;
261 		lastlink = &__first_link;
262 	}
263 
264 	lp = lastlink;
265 
266 	/*
267 	 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
268 	 * and for allocation of new links
269 	 * low contention expected on _findiop(), hence coarse locking.
270 	 * for finer granularity, use fp->_lock for allocating an iop
271 	 * and make the testing of lp->next and allocation of new link atomic
272 	 * using lp->_lock
273 	 */
274 
275 	do {
276 		prev = &lp->next;
277 		FIRSTFP(lp, fp);
278 
279 		for (i = lp->niob; --i >= 0; NEXTFP(fp)) {
280 			FILE *ret;
281 			if (threaded) {
282 				ret = getiop(fp, FPLOCK(fp), FPSTATE(fp));
283 				if (ret != NULL) {
284 					cancel_safe_mutex_unlock(
285 					    &_first_link_lock);
286 					return (ret);
287 				}
288 			} else {
289 				ret = getiop(fp, NULL, FPSTATE(fp));
290 				if (ret != NULL)
291 					return (ret);
292 			}
293 		}
294 	} while ((lastlink = lp = lp->next) != NULL);
295 
296 	/*
297 	 * If there was a sufficient number of  fcloses since we last started
298 	 * at __first_link, we rescan all fp's again.  We do not rescan for
299 	 * all fcloses; that would simplify the algorithm but would make
300 	 * search times near O(n) again.
301 	 * Worst case behaviour would still be pretty bad (open a full set,
302 	 * then continously opening and closing one FILE * gets you a full
303 	 * scan each time).  That's why we over allocate 1 FILE for each
304 	 * 32 chunks.  More over allocation is better; this is a nice
305 	 * empirical value which doesn't cost a lot of memory, doesn't
306 	 * overallocate until we reach 256 FILE *s and keeps the performance
307 	 * pretty close to the optimum.
308 	 */
309 	if (fcloses > nchunks/32)
310 		goto rescan;
311 
312 	/*
313 	 * Need to allocate another and put it in the linked list.
314 	 */
315 	if ((pkgp = malloc(sizeof (Pkg))) == NULL) {
316 		if (threaded)
317 			cancel_safe_mutex_unlock(&_first_link_lock);
318 		return (NULL);
319 	}
320 
321 	(void) memset(pkgp, 0, sizeof (Pkg));
322 
323 #ifdef _LP64
324 	hdr = &pkgp->hdr;
325 	hdr->iobp = &pkgp->iob[0];
326 #else
327 	/*
328 	 * The problem with referencing a word after a FILE* is the possibility
329 	 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
330 	 * boundary.  We run this check so we never need to run an expensive
331 	 * check like mincore() in order to know whether it is
332 	 * safe to dereference ((xFILE*)fp)->xmagic.
333 	 * We allocate the block with two alternative layouts; if one
334 	 * layout is not properly aligned for our purposes, the other layout
335 	 * will be because the size of _link_ is small compared to
336 	 * sizeof (xFILE).
337 	 * The check performed is this:
338 	 *	If the distance from pkgp to the end of the page is
339 	 *	less than the the offset of the last xmagic field in the
340 	 *	xFILE structure, (the 0x1000 boundary is inside our just
341 	 *	allocated structure) and the distance modulo the size of xFILE
342 	 *	is identical to the offset of the first xmagic in the
343 	 *	structure (i.e., XXXXXX000 points to an xmagic field),
344 	 *	we need to use the reverse structure.
345 	 */
346 	if ((delta = 0x1000 - ((uintptr_t)pkgp & 0xfff)) <=
347 	    offsetof(Pkg, Pkgn.iob[FILE_ARY_SZ-1].xmagic) &&
348 	    delta % sizeof (struct xFILE) ==
349 	    offsetof(Pkg, Pkgn.iob[0].xmagic)) {
350 		/* Use reversed structure */
351 		hdr = &pkgp->Pkgr.hdr;
352 		hdr->iobp = &pkgp->Pkgr.iob[0];
353 	} else {
354 		/* Use normal structure */
355 		hdr = &pkgp->Pkgn.hdr;
356 		hdr->iobp = &pkgp->Pkgn.iob[0];
357 	}
358 #endif /* _LP64 */
359 
360 	hdr->niob = FILE_ARY_SZ;
361 	nchunks++;
362 
363 #ifdef	_LP64
364 	fp = hdr->iobp;
365 	for (i = 0; i < FILE_ARY_SZ; i++)
366 		(void) mutex_init(&fp[i]._lock,
367 		    USYNC_THREAD | LOCK_RECURSIVE, NULL);
368 #else
369 	xfp = hdr->iobp;
370 	fp = &xfp->_iob;
371 
372 	for (i = 0; i < FILE_ARY_SZ; i++) {
373 		xfp[i].xmagic = XMAGIC(&xfp[i]);
374 		(void) mutex_init(&xfp[i].xlock,
375 		    USYNC_THREAD | LOCK_RECURSIVE, NULL);
376 	}
377 #endif	/*	_LP64	*/
378 
379 	lastlink = *prev = hdr;
380 	fp->_ptr = 0;
381 	fp->_base = 0;
382 	/* claim the fp by setting low 8 bits */
383 	fp->_flag = _DEF_FLAG_MASK;
384 	if (threaded)
385 		cancel_safe_mutex_unlock(&_first_link_lock);
386 
387 	return (fp);
388 }
389 
390 static void
391 isseekable(FILE *iop)
392 {
393 	struct stat64 fstatbuf;
394 	int fd, save_errno;
395 
396 	save_errno = errno;
397 
398 	/*
399 	 * non-FILE based STREAMS are required to declare their own seekability
400 	 * and therefore we should not try and test them below.
401 	 */
402 	fd = _get_fd(iop);
403 	if (fd == -1) {
404 		return;
405 	}
406 	if (fstat64(fd, &fstatbuf) != 0) {
407 		/*
408 		 * when we don't know what it is we'll
409 		 * do the old behaviour and flush
410 		 * the stream
411 		 */
412 		SET_SEEKABLE(iop);
413 		errno = save_errno;
414 		return;
415 	}
416 
417 	/*
418 	 * check for what is non-SEEKABLE
419 	 * otherwise assume it's SEEKABLE so we get the old
420 	 * behaviour and flush the stream
421 	 */
422 
423 	if (S_ISFIFO(fstatbuf.st_mode) || S_ISCHR(fstatbuf.st_mode) ||
424 	    S_ISSOCK(fstatbuf.st_mode) || S_ISDOOR(fstatbuf.st_mode)) {
425 		CLEAR_SEEKABLE(iop);
426 	} else {
427 		SET_SEEKABLE(iop);
428 	}
429 
430 	errno = save_errno;
431 }
432 
433 #ifdef	_LP64
434 void
435 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
436 {
437 	iop->_end = end;
438 
439 	isseekable(iop);
440 }
441 
442 #undef _realbufend
443 
444 Uchar *
445 _realbufend(FILE *iop)		/* get the end pointer for this iop */
446 {
447 	return (iop->_end);
448 }
449 
450 #else /* _LP64 */
451 
452 /*
453  * Awkward functions not needed for the sane 64 bit environment.
454  */
455 /*
456  * xmagic must not be aligned on a 4K boundary. We guarantee this in
457  * _findiop().
458  */
459 #define	VALIDXFILE(xfp) \
460 	(((uintptr_t)&(xfp)->xmagic & 0xfff) && \
461 	    (xfp)->xmagic == XMAGIC(FILEx(xfp)))
462 
463 static struct xFILEdata *
464 getxfdat(FILE *iop)
465 {
466 	if (STDIOP(iop))
467 		return (&_xftab[IOPIND(iop)]);
468 	else if (VALIDXFILE(FILEx(iop)))
469 		return (&FILEx(iop)->_xdat);
470 	else
471 		return (NULL);
472 }
473 
474 void
475 _setbufend(FILE *iop, Uchar *end)	/* set the end pointer for this iop */
476 {
477 	struct xFILEdata *dat = getxfdat(iop);
478 
479 	if (dat != NULL)
480 		dat->_end = end;
481 
482 	isseekable(iop);
483 
484 	/*
485 	 * For binary compatibility with user programs using the
486 	 * old _bufend macro.  This is *so* broken, fileno()
487 	 * is not the proper index.
488 	 */
489 	if (iop->_magic < _NFILE)
490 		_bufendtab[iop->_magic] = end;
491 
492 }
493 
494 Uchar *
495 _realbufend(FILE *iop)		/* get the end pointer for this iop */
496 {
497 	struct xFILEdata *dat = getxfdat(iop);
498 
499 	if (dat != NULL)
500 		return (dat->_end);
501 
502 	return (NULL);
503 }
504 
505 /*
506  * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
507  * it is therefor extremely performance sensitive.  We get better performance
508  * by inlining the STDIOP check in IOB_LCK and inlining a custom version
509  * of getfxdat() here.
510  */
511 rmutex_t *
512 _reallock(FILE *iop)
513 {
514 	if (VALIDXFILE(FILEx(iop)))
515 		return (&FILEx(iop)->xlock);
516 
517 	return (NULL);
518 }
519 
520 #endif	/*	_LP64	*/
521 
522 /* make sure _cnt, _ptr are correct */
523 void
524 _bufsync(FILE *iop, Uchar *bufend)
525 {
526 	ssize_t spaceleft;
527 
528 	spaceleft = bufend - iop->_ptr;
529 	if (bufend < iop->_ptr) {
530 		iop->_ptr = bufend;
531 		iop->_cnt = 0;
532 	} else if (spaceleft < iop->_cnt)
533 		iop->_cnt = spaceleft;
534 }
535 
536 /* really write out current buffer contents */
537 int
538 _xflsbuf(FILE *iop)
539 {
540 	ssize_t n;
541 	Uchar *base = iop->_base;
542 	Uchar *bufend;
543 	ssize_t num_wrote;
544 
545 	/*
546 	 * Hopefully, be stable with respect to interrupts...
547 	 */
548 	n = iop->_ptr - base;
549 	iop->_ptr = base;
550 	bufend = _bufend(iop);
551 	if (iop->_flag & (_IOLBF | _IONBF))
552 		iop->_cnt = 0;		/* always go to a flush */
553 	else
554 		iop->_cnt = bufend - base;
555 
556 	if (_needsync(iop, bufend))	/* recover from interrupts */
557 		_bufsync(iop, bufend);
558 
559 	if (n > 0) {
560 		while ((num_wrote = _xwrite(iop, base, (size_t)n)) != n) {
561 			if (num_wrote <= 0) {
562 				if (!cancel_active())
563 					iop->_flag |= _IOERR;
564 				return (EOF);
565 			}
566 			n -= num_wrote;
567 			base += num_wrote;
568 		}
569 	}
570 	return (0);
571 }
572 
573 /* flush (write) buffer */
574 int
575 fflush(FILE *iop)
576 {
577 	int res;
578 	rmutex_t *lk;
579 
580 	if (iop) {
581 		FLOCKFILE(lk, iop);
582 		res = _fflush_u(iop);
583 		FUNLOCKFILE(lk);
584 	} else {
585 		res = _fflush_l_iops();		/* flush all iops */
586 	}
587 	return (res);
588 }
589 
590 static int
591 _fflush_l_iops(void)		/* flush all buffers */
592 {
593 	FPDECL(iop);
594 
595 	int i;
596 	struct _link_ *lp;
597 	int res = 0;
598 	rmutex_t *lk;
599 	/* Allow the compiler to optimize the load out of the loop */
600 	int threaded = __libc_threaded;
601 
602 	if (threaded)
603 		cancel_safe_mutex_lock(&_first_link_lock);
604 
605 	lp = &__first_link;
606 
607 	do {
608 		/*
609 		 * We need to grab the file locks or file corruption
610 		 * will happen.  But we first check the flags field
611 		 * knowing that when it is 0, it isn't allocated and
612 		 * cannot be allocated while we're holding the
613 		 * _first_link_lock.  And when _IONBF is set (also the
614 		 * case when _flag is 0377 -- _DEF_FLAG_MASK, or alloc in
615 		 * progress), we also ignore it.
616 		 *
617 		 * Ignore locked streams; it will appear as if
618 		 * concurrent updates happened after fflush(NULL).  Note
619 		 * that we even attempt to lock if the locking is set to
620 		 * "by caller".  We don't want to penalize callers of
621 		 * __fsetlocking() by not flushing their files.  Note: if
622 		 * __fsetlocking() callers don't employ any locking, they
623 		 * may still face corruption in fflush(NULL); but that's
624 		 * no change from earlier releases.
625 		 */
626 		FIRSTFP(lp, iop);
627 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
628 			unsigned int flag = iop->_flag;
629 
630 			/* flag 0, flag 0377, or _IONBF set */
631 			if (flag == 0 || (flag & _IONBF) != 0)
632 				continue;
633 
634 			if (threaded) {
635 				lk = FPLOCK(iop);
636 				if (cancel_safe_mutex_trylock(lk) != 0)
637 					continue;
638 			}
639 
640 			if (!(iop->_flag & _IONBF)) {
641 				/*
642 				 * don't need to worry about the _IORW case
643 				 * since the iop will also marked with _IOREAD
644 				 * or _IOWRT whichever we are really doing
645 				 */
646 				if (iop->_flag & _IOWRT) {
647 					/* Flush write buffers */
648 					res |= _fflush_u(iop);
649 				} else if (iop->_flag & _IOREAD) {
650 					/*
651 					 * flush seekable read buffers
652 					 * don't flush non-seekable read buffers
653 					 */
654 					if (GET_SEEKABLE(iop)) {
655 						res |= _fflush_u(iop);
656 					}
657 				}
658 			}
659 			if (threaded)
660 				cancel_safe_mutex_unlock(lk);
661 		}
662 	} while ((lp = lp->next) != NULL);
663 	if (threaded)
664 		cancel_safe_mutex_unlock(&_first_link_lock);
665 	return (res);
666 }
667 
668 /* flush buffer */
669 int
670 _fflush_u(FILE *iop)
671 {
672 	int res = 0;
673 
674 	/* this portion is always assumed locked */
675 	if (!(iop->_flag & _IOWRT)) {
676 		(void) _xseek64(iop, -iop->_cnt, SEEK_CUR);
677 		iop->_cnt = 0;
678 		/* needed for ungetc & multibyte pushbacks */
679 		iop->_ptr = iop->_base;
680 		if (iop->_flag & _IORW) {
681 			iop->_flag &= ~_IOREAD;
682 		}
683 		return (0);
684 	}
685 	if (iop->_base != NULL && iop->_ptr > iop->_base) {
686 		res = _xflsbuf(iop);
687 	}
688 	if (iop->_flag & _IORW) {
689 		iop->_flag &= ~_IOWRT;
690 		iop->_cnt = 0;
691 	}
692 	return (res);
693 }
694 
695 /* flush buffer and close stream */
696 int
697 fclose(FILE *iop)
698 {
699 	int res = 0;
700 	rmutex_t *lk;
701 
702 	if (iop == NULL) {
703 		return (EOF);		/* avoid passing zero to FLOCKFILE */
704 	}
705 
706 	FLOCKFILE(lk, iop);
707 	if (iop->_flag == 0) {
708 		FUNLOCKFILE(lk);
709 		return (EOF);
710 	}
711 	/* Is not unbuffered and opened for read and/or write ? */
712 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
713 		res = _fflush_u(iop);
714 	if (_xclose(iop) < 0)
715 		res = EOF;
716 	if (iop->_flag & _IOMYBUF) {
717 		(void) free((char *)iop->_base - PUSHBACK);
718 	}
719 	iop->_base = NULL;
720 	iop->_ptr = NULL;
721 	iop->_cnt = 0;
722 	iop->_flag = 0;			/* marks it as available */
723 	FUNLOCKFILE(lk);
724 
725 	if (__libc_threaded)
726 		cancel_safe_mutex_lock(&_first_link_lock);
727 	fcloses++;
728 	if (__libc_threaded)
729 		cancel_safe_mutex_unlock(&_first_link_lock);
730 
731 	return (res);
732 }
733 
734 /* close all open streams */
735 int
736 fcloseall(void)
737 {
738 	FPDECL(iop);
739 
740 	struct _link_ *lp;
741 	rmutex_t *lk;
742 
743 	if (__libc_threaded)
744 		cancel_safe_mutex_lock(&_first_link_lock);
745 
746 	lp = &__first_link;
747 
748 	do {
749 		int i;
750 
751 		FIRSTFP(lp, iop);
752 		for (i = lp->niob; --i >= 0; NEXTFP(iop)) {
753 			/* code stolen from fclose(), above */
754 
755 			FLOCKFILE(lk, iop);
756 			if (iop->_flag == 0) {
757 				FUNLOCKFILE(lk);
758 				continue;
759 			}
760 
761 			/* Not unbuffered and opened for read and/or write? */
762 			if (!(iop->_flag & _IONBF) &&
763 			    (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
764 				(void) _fflush_u(iop);
765 			(void) _xclose(iop);
766 			if (iop->_flag & _IOMYBUF)
767 				free((char *)iop->_base - PUSHBACK);
768 			iop->_base = NULL;
769 			iop->_ptr = NULL;
770 			iop->_cnt = 0;
771 			iop->_flag = 0;		/* marks it as available */
772 			FUNLOCKFILE(lk);
773 			fcloses++;
774 		}
775 	} while ((lp = lp->next) != NULL);
776 
777 	if (__libc_threaded)
778 		cancel_safe_mutex_unlock(&_first_link_lock);
779 
780 	return (0);
781 }
782 
783 /* flush buffer, close fd but keep the stream used by freopen() */
784 int
785 close_fd(FILE *iop)
786 {
787 	int res = 0;
788 	mbstate_t *mb;
789 
790 	if (iop == NULL || iop->_flag == 0)
791 		return (EOF);
792 	/* Is not unbuffered and opened for read and/or write ? */
793 	if (!(iop->_flag & _IONBF) && (iop->_flag & (_IOWRT | _IOREAD | _IORW)))
794 		res = _fflush_u(iop);
795 	if (_xclose(iop) < 0)
796 		res = EOF;
797 	if (iop->_flag & _IOMYBUF) {
798 		(void) free((char *)iop->_base - PUSHBACK);
799 	}
800 	iop->_base = NULL;
801 	iop->_ptr = NULL;
802 	mb = _getmbstate(iop);
803 	if (mb != NULL)
804 		(void) memset(mb, 0, sizeof (mbstate_t));
805 	iop->_cnt = 0;
806 	_setorientation(iop, _NO_MODE);
807 	return (res);
808 }
809 
810 static FILE *
811 getiop(FILE *fp, rmutex_t *lk, mbstate_t *mb)
812 {
813 	if (lk != NULL && cancel_safe_mutex_trylock(lk) != 0)
814 		return (NULL);	/* locked: fp in use */
815 
816 	if (fp->_flag == 0) {	/* unused */
817 #ifndef	_LP64
818 		fp->__orientation = 0;
819 #endif /* _LP64 */
820 		fp->_cnt = 0;
821 		fp->_ptr = NULL;
822 		fp->_base = NULL;
823 		/* claim the fp by setting low 8 bits */
824 		fp->_flag = _DEF_FLAG_MASK;
825 		(void) memset(mb, 0, sizeof (mbstate_t));
826 		FUNLOCKFILE(lk);
827 		return (fp);
828 	}
829 	FUNLOCKFILE(lk);
830 	return (NULL);
831 }
832 
833 #ifndef	_LP64
834 /*
835  * DESCRIPTION:
836  * This function gets the pointer to the mbstate_t structure associated
837  * with the specified iop.
838  *
839  * RETURNS:
840  * If the associated mbstate_t found, the pointer to the mbstate_t is
841  * returned.  Otherwise, NULL is returned.
842  */
843 mbstate_t *
844 _getmbstate(FILE *iop)
845 {
846 	struct xFILEdata *dat = getxfdat(iop);
847 
848 	if (dat != NULL)
849 		return (&dat->_state);
850 
851 	return (NULL);
852 }
853 
854 /*
855  * More 32-bit only functions.
856  * They lookup/set large fd's for extended FILE support.
857  */
858 
859 /*
860  * The negative value indicates that Extended fd FILE's has not
861  * been enabled by the user.
862  */
863 static int bad_fd = -1;
864 
865 int
866 _file_get(FILE *iop)
867 {
868 	int altfd;
869 
870 	/*
871 	 * Failure indicates a FILE * not allocated through stdio;
872 	 * it means the flag values are probably bogus and that if
873 	 * a file descriptor is set, it's in _magic.
874 	 * Inline getxfdat() for performance reasons.
875 	 */
876 	if (STDIOP(iop))
877 		altfd = _xftab[IOPIND(iop)]._altfd;
878 	else if (VALIDXFILE(FILEx(iop)))
879 		altfd = FILEx(iop)->_xdat._altfd;
880 	else
881 		return (iop->_magic);
882 	/*
883 	 * if this is not an internal extended FILE then check
884 	 * if _file is being changed from underneath us.
885 	 * It should not be because if
886 	 * it is then then we lose our ability to guard against
887 	 * silent data corruption.
888 	 */
889 	if (!iop->__xf_nocheck && bad_fd > -1 && iop->_magic != bad_fd) {
890 		(void) fprintf(stderr,
891 		    "Application violated extended FILE safety mechanism.\n"
892 		    "Please read the man page for extendedFILE.\nAborting\n");
893 		abort();
894 	}
895 	return (altfd);
896 }
897 
898 int
899 _file_set(FILE *iop, int fd, const char *type)
900 {
901 	struct xFILEdata *dat;
902 	int Fflag;
903 
904 	/* Already known to contain at least one byte */
905 	while (*++type != '\0')
906 		;
907 
908 	Fflag = type[-1] == 'F';
909 	if (!Fflag && bad_fd < 0) {
910 		errno = EMFILE;
911 		return (-1);
912 	}
913 
914 	dat = getxfdat(iop);
915 	iop->__extendedfd = 1;
916 	iop->__xf_nocheck = Fflag;
917 	dat->_altfd = fd;
918 	iop->_magic = (unsigned char)bad_fd;
919 	return (0);
920 }
921 
922 /*
923  * Activates extended fd's in FILE's
924  */
925 
926 static const int tries[] = {196, 120, 60, 3};
927 #define	NTRIES	(sizeof (tries)/sizeof (int))
928 
929 int
930 enable_extended_FILE_stdio(int fd, int action)
931 {
932 	int i;
933 
934 	if (action < 0)
935 		action = SIGABRT;	/* default signal */
936 
937 	if (fd < 0) {
938 		/*
939 		 * search for an available fd and make it the badfd
940 		 */
941 		for (i = 0; i < NTRIES; i++) {
942 			fd = fcntl(tries[i], F_BADFD, action);
943 			if (fd >= 0)
944 				break;
945 		}
946 		if (fd < 0)	/* failed to find an available fd */
947 			return (-1);
948 	} else {
949 		/* caller requests that fd be the chosen badfd */
950 		int nfd = fcntl(fd, F_BADFD, action);
951 		if (nfd < 0 || nfd != fd)
952 			return (-1);
953 	}
954 	bad_fd = fd;
955 	return (0);
956 }
957 #endif
958 
959 /*
960  * Wrappers around the various system calls that stdio needs to make on a file
961  * descriptor.
962  */
963 static stdio_ops_t *
964 get_stdops(FILE *iop)
965 {
966 #ifdef	_LP64
967 	return (iop->_ops);
968 #else
969 	struct xFILEdata *dat = getxfdat(iop);
970 	return (dat->_ops);
971 #endif
972 }
973 
974 static void
975 set_stdops(FILE *iop, stdio_ops_t *ops)
976 {
977 #ifdef	_LP64
978 	ASSERT3P(iop->_ops, ==, NULL);
979 	iop->_ops = ops;
980 #else
981 	struct xFILEdata *dat = getxfdat(iop);
982 	ASSERT3P(dat->_ops, ==, NULL);
983 	dat->_ops = ops;
984 #endif
985 
986 }
987 
988 static void
989 clr_stdops(FILE *iop)
990 {
991 #ifdef	_LP64
992 	iop->_ops = NULL;
993 #else
994 	struct xFILEdata *dat = getxfdat(iop);
995 	dat->_ops = NULL;
996 #endif
997 
998 }
999 
1000 ssize_t
1001 _xread(FILE *iop, void *buf, size_t nbytes)
1002 {
1003 	stdio_ops_t *ops = get_stdops(iop);
1004 	if (ops != NULL) {
1005 		return (ops->std_read(iop, buf, nbytes));
1006 	}
1007 
1008 	return (read(_get_fd(iop), buf, nbytes));
1009 }
1010 
1011 ssize_t
1012 _xwrite(FILE *iop, const void *buf, size_t nbytes)
1013 {
1014 	stdio_ops_t *ops = get_stdops(iop);
1015 	if (ops != NULL) {
1016 		return (ops->std_write(iop, buf, nbytes));
1017 	}
1018 	return (write(_get_fd(iop), buf, nbytes));
1019 }
1020 
1021 off_t
1022 _xseek(FILE *iop, off_t off, int whence)
1023 {
1024 	stdio_ops_t *ops = get_stdops(iop);
1025 	if (ops != NULL) {
1026 		return (ops->std_seek(iop, off, whence));
1027 	}
1028 
1029 	return (lseek(_get_fd(iop), off, whence));
1030 }
1031 
1032 off64_t
1033 _xseek64(FILE *iop, off64_t off, int whence)
1034 {
1035 	stdio_ops_t *ops = get_stdops(iop);
1036 	if (ops != NULL) {
1037 		/*
1038 		 * The internal APIs only operate with an off_t. An off64_t in
1039 		 * an ILP32 environment may represent a value larger than they
1040 		 * can accept. As such, we try and catch such cases and error
1041 		 * about it before we get there.
1042 		 */
1043 		if (off > LONG_MAX || off < LONG_MIN) {
1044 			errno = EOVERFLOW;
1045 			return (-1);
1046 		}
1047 		return (ops->std_seek(iop, off, whence));
1048 	}
1049 
1050 	return (lseek64(_get_fd(iop), off, whence));
1051 }
1052 
1053 int
1054 _xclose(FILE *iop)
1055 {
1056 	stdio_ops_t *ops = get_stdops(iop);
1057 	if (ops != NULL) {
1058 		return (ops->std_close(iop));
1059 	}
1060 
1061 	return (close(_get_fd(iop)));
1062 }
1063 
1064 void *
1065 _xdata(FILE *iop)
1066 {
1067 	stdio_ops_t *ops = get_stdops(iop);
1068 	if (ops != NULL) {
1069 		return (ops->std_data);
1070 	}
1071 
1072 	return (NULL);
1073 }
1074 
1075 int
1076 _xassoc(FILE *iop, fread_t readf, fwrite_t writef, fseek_t seekf,
1077     fclose_t closef, void *data)
1078 {
1079 	stdio_ops_t *ops = get_stdops(iop);
1080 
1081 	if (ops == NULL) {
1082 		ops = malloc(sizeof (*ops));
1083 		if (ops == NULL) {
1084 			return (-1);
1085 		}
1086 		set_stdops(iop, ops);
1087 	}
1088 
1089 	ops->std_read = readf;
1090 	ops->std_write = writef;
1091 	ops->std_seek = seekf;
1092 	ops->std_close = closef;
1093 	ops->std_data = data;
1094 
1095 	return (0);
1096 }
1097 
1098 void
1099 _xunassoc(FILE *iop)
1100 {
1101 	stdio_ops_t *ops = get_stdops(iop);
1102 	if (ops == NULL) {
1103 		return;
1104 	}
1105 	clr_stdops(iop);
1106 	free(ops);
1107 }
1108 
1109 int
1110 _get_fd(FILE *iop)
1111 {
1112 	/*
1113 	 * Streams with an ops vector (currently the memory stream family) do
1114 	 * not have an underlying file descriptor that we can give back to the
1115 	 * user. In such cases, return -1 to explicitly make sure that they'll
1116 	 * get an ebadf from things.
1117 	 */
1118 	if (get_stdops(iop) != NULL) {
1119 		return (-1);
1120 	}
1121 #ifdef  _LP64
1122 	return (iop->_file);
1123 #else
1124 	if (iop->__extendedfd) {
1125 		return (_file_get(iop));
1126 	} else {
1127 		return (iop->_magic);
1128 	}
1129 #endif
1130 }
1131